hip_filename
stringlengths 5
84
| hip_content
stringlengths 79
9.69M
| cuda_filename
stringlengths 4
83
| cuda_content
stringlengths 19
9.69M
|
---|---|---|---|
b58b9ee985cff8be90be4c5b044f41f3eac80c36.hip | // !!! This is a file automatically generated by hipify!!!
#include <iostream>
#include <hip/hip_runtime.h>
#include <hip/hip_runtime_api.h>
#include "InputOutput.h"
#include "precondConjugateGradientSparse.cuh"
#include "Utilities.cuh"
using namespace std;
#define REAL_TYPE double
/********/
/* MAIN */
/********/
int main() {
const int nnz = 54924;
const int Nrows = 7900;
const int Ncols = 7900;
cout << "CUDA CG Solver test." << endl << endl;
cout << "Reading in matrix" << endl;
int *h_rowIndices_CSR = (int *) malloc((Nrows + 1) * sizeof(int));
int *h_colIndices_CSR = (int *) malloc(nnz * sizeof(int));
REAL_TYPE *h_AValues_CSR = (REAL_TYPE *) malloc(nnz * sizeof(REAL_TYPE));
loadCPUrealtxt(h_AValues_CSR, "D:\\sparsePreconditionedCG\\sparsePreconditionedCG_Approach1\\values.txt", nnz);
loadCPUrealtxt(h_colIndices_CSR, "D:\\sparsePreconditionedCG\\sparsePreconditionedCG_Approach1\\colIndices.txt", nnz);
loadCPUrealtxt(h_rowIndices_CSR, "D:\\sparsePreconditionedCG\\sparsePreconditionedCG_Approach1\\rowIndices.txt", Nrows + 1);
// --- Corrects infinites for single precision case
for (int k = 0; k < nnz; k++) if (isinf(h_AValues_CSR[k])) h_AValues_CSR[k] = FLT_MAX;
cout << "Reading in RHS vector" << endl;
REAL_TYPE *h_b = (REAL_TYPE *)malloc(Nrows * sizeof(REAL_TYPE));
loadCPUrealtxt(h_b, "D:\\sparsePreconditionedCG\\sparsePreconditionedCG_Approach1\\b.txt", Nrows);
cout << "Reading in reference solution vector" << endl;
REAL_TYPE *x = (REAL_TYPE *)malloc(Ncols * sizeof(REAL_TYPE));
loadCPUrealtxt(x, "D:\\sparsePreconditionedCG\\sparsePreconditionedCG_Approach1\\x.txt", Ncols);
cout << "Calling solver" << endl;
REAL_TYPE *h_res = (REAL_TYPE *)malloc(Nrows * sizeof(REAL_TYPE));
// --- Allocate space for the CSR matrix and host -> device memory copy
REAL_TYPE *d_AValues_CSR; gpuErrchk(hipMalloc((void **)&d_AValues_CSR, sizeof(REAL_TYPE) * nnz));
gpuErrchk(hipMemcpy(d_AValues_CSR, h_AValues_CSR, sizeof(REAL_TYPE) * nnz, hipMemcpyHostToDevice));
int *d_rowIndices_CSR; gpuErrchk(hipMalloc((void **)&d_rowIndices_CSR, sizeof(int) * (Nrows + 1)));
gpuErrchk(hipMemcpy(d_rowIndices_CSR, h_rowIndices_CSR, sizeof(int) * (Nrows + 1), hipMemcpyHostToDevice));
int *d_colIndices_CSR; gpuErrchk(hipMalloc((void **)&d_colIndices_CSR, sizeof(int) * nnz));
gpuErrchk(hipMemcpy(d_colIndices_CSR, h_colIndices_CSR, sizeof(int) * nnz, hipMemcpyHostToDevice));
// --- Moves the rhs from host to device
REAL_TYPE *d_b; hipMalloc((void **)&d_b, sizeof(REAL_TYPE) * Ncols);
gpuErrchk(hipMemcpy(d_b, h_b, sizeof(REAL_TYPE) * Ncols, hipMemcpyHostToDevice));
// --- Allocate space for result vector on the device
REAL_TYPE *d_x; gpuErrchk(hipMalloc((void **)&d_x, sizeof(REAL_TYPE) * Ncols));
int iterations;
precondConjugateGradientSparse(d_rowIndices_CSR, Nrows + 1, d_colIndices_CSR, d_AValues_CSR, nnz, d_b, Nrows, d_x, 1, iterations);
printf("Iterations: %d \n", iterations);
// --- Copy result back to the host
gpuErrchk(hipMemcpy(h_res, d_x, sizeof(REAL_TYPE) * Ncols, hipMemcpyDeviceToHost));
REAL_TYPE l2norm = h_l2_norm(h_res, x, Ncols);
cout << "L2 Norm is " << l2norm << endl;
}
| b58b9ee985cff8be90be4c5b044f41f3eac80c36.cu | #include <iostream>
#include <cuda.h>
#include <cuda_runtime_api.h>
#include "InputOutput.h"
#include "precondConjugateGradientSparse.cuh"
#include "Utilities.cuh"
using namespace std;
#define REAL_TYPE double
/********/
/* MAIN */
/********/
int main() {
const int nnz = 54924;
const int Nrows = 7900;
const int Ncols = 7900;
cout << "CUDA CG Solver test." << endl << endl;
cout << "Reading in matrix" << endl;
int *h_rowIndices_CSR = (int *) malloc((Nrows + 1) * sizeof(int));
int *h_colIndices_CSR = (int *) malloc(nnz * sizeof(int));
REAL_TYPE *h_AValues_CSR = (REAL_TYPE *) malloc(nnz * sizeof(REAL_TYPE));
loadCPUrealtxt(h_AValues_CSR, "D:\\sparsePreconditionedCG\\sparsePreconditionedCG_Approach1\\values.txt", nnz);
loadCPUrealtxt(h_colIndices_CSR, "D:\\sparsePreconditionedCG\\sparsePreconditionedCG_Approach1\\colIndices.txt", nnz);
loadCPUrealtxt(h_rowIndices_CSR, "D:\\sparsePreconditionedCG\\sparsePreconditionedCG_Approach1\\rowIndices.txt", Nrows + 1);
// --- Corrects infinites for single precision case
for (int k = 0; k < nnz; k++) if (isinf(h_AValues_CSR[k])) h_AValues_CSR[k] = FLT_MAX;
cout << "Reading in RHS vector" << endl;
REAL_TYPE *h_b = (REAL_TYPE *)malloc(Nrows * sizeof(REAL_TYPE));
loadCPUrealtxt(h_b, "D:\\sparsePreconditionedCG\\sparsePreconditionedCG_Approach1\\b.txt", Nrows);
cout << "Reading in reference solution vector" << endl;
REAL_TYPE *x = (REAL_TYPE *)malloc(Ncols * sizeof(REAL_TYPE));
loadCPUrealtxt(x, "D:\\sparsePreconditionedCG\\sparsePreconditionedCG_Approach1\\x.txt", Ncols);
cout << "Calling solver" << endl;
REAL_TYPE *h_res = (REAL_TYPE *)malloc(Nrows * sizeof(REAL_TYPE));
// --- Allocate space for the CSR matrix and host -> device memory copy
REAL_TYPE *d_AValues_CSR; gpuErrchk(cudaMalloc((void **)&d_AValues_CSR, sizeof(REAL_TYPE) * nnz));
gpuErrchk(cudaMemcpy(d_AValues_CSR, h_AValues_CSR, sizeof(REAL_TYPE) * nnz, cudaMemcpyHostToDevice));
int *d_rowIndices_CSR; gpuErrchk(cudaMalloc((void **)&d_rowIndices_CSR, sizeof(int) * (Nrows + 1)));
gpuErrchk(cudaMemcpy(d_rowIndices_CSR, h_rowIndices_CSR, sizeof(int) * (Nrows + 1), cudaMemcpyHostToDevice));
int *d_colIndices_CSR; gpuErrchk(cudaMalloc((void **)&d_colIndices_CSR, sizeof(int) * nnz));
gpuErrchk(cudaMemcpy(d_colIndices_CSR, h_colIndices_CSR, sizeof(int) * nnz, cudaMemcpyHostToDevice));
// --- Moves the rhs from host to device
REAL_TYPE *d_b; cudaMalloc((void **)&d_b, sizeof(REAL_TYPE) * Ncols);
gpuErrchk(cudaMemcpy(d_b, h_b, sizeof(REAL_TYPE) * Ncols, cudaMemcpyHostToDevice));
// --- Allocate space for result vector on the device
REAL_TYPE *d_x; gpuErrchk(cudaMalloc((void **)&d_x, sizeof(REAL_TYPE) * Ncols));
int iterations;
precondConjugateGradientSparse(d_rowIndices_CSR, Nrows + 1, d_colIndices_CSR, d_AValues_CSR, nnz, d_b, Nrows, d_x, 1, iterations);
printf("Iterations: %d \n", iterations);
// --- Copy result back to the host
gpuErrchk(cudaMemcpy(h_res, d_x, sizeof(REAL_TYPE) * Ncols, cudaMemcpyDeviceToHost));
REAL_TYPE l2norm = h_l2_norm(h_res, x, Ncols);
cout << "L2 Norm is " << l2norm << endl;
}
|
e47a106ec7421a96439e71aae266e3269c083b74.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <stdio.h>
#include <stdlib.h>
#include <math.h>
#include <time.h>
#include <fstream>
#include <iostream>
#include <hiprand/hiprand.h>
#include <hiprand/hiprand_kernel.h>
#define Infinity 65536 /* pow (2, 16) */
/*
randdouble()
Retorna um numero (double) aleatorio entre 0.0f e 1.0f.
Parametros:
Saida:
numero aleaorio entre 0.0 e 1.0.
*/
#define randdouble() ((double)rand()/(double)RAND_MAX)
/*
randomize()
Atualiza o gerador de numeros pseudo-aletatorios.
Parametros:
Saida:
*/
#define randomize() srand((unsigned)time(NULL))
/*
index()
Mapeia uma posicao de uma matriz (2D) para um indice de um vetor (1D).
Parametros:
length: numero de colunas da matriz
line: indice da linha
column: indice da coluna
Sada:
indice mapeado
*/
#define index(length,line,column) (column + line * length)
using namespace std;
const int NUMBER_OF_ITERATIONS = 100;
const double INIT_PHEROMONE_AMOUNT = 1.0;
const double EVAPORATION_RATE = 0.5;
const double ALFA = 1; /* Influencia da trilha de feromonios */
const double BETA = 2; /* Influencia da informacao heuristica */
int threads ( int n_ants );
int thread_per_block ( int n_ants );
/*
load_instance()
Inicializa uma instancia ( numero de cidades e matriz de distancias ) do TSP.
Parametros:
filename: nome do arquivo
n_cities: numero de cidades ( passagem por referencia )
Saida:
matriz de distancias ( distancias euclidianas )
*/
int *load_instance ( char const *filename, int &n_cities );
/*
calculate_pathcost()
Calcula o custo (soma dos custos de todas as arestas) de um caminho .
Parametros:
distances: matriz de distancias
path: caminho ( solucao )
n_cities: numero de cidades
Saida:
custo ( soma de todas as distancias ) do caminho
*/
int calculate_pathcost ( int *distances, int *path, int n_cities );
/*
best_solution()
Retorna a melhor entre as solucoes geradas.
Parametros:
ants: matriz de solucoes
distances: matriz de distancias ( entre cidades )
n_ants: numero de formigas
n_cities: numero de cidades
Saida:
melhor solucao encontrada
*/
int *best_solution ( int *tours, int *distances, int n_ants, int n_cities );
/*
evaporate()
Atualiza a matriz de feromonios aplicando evaporacao.
Para cada vertice, multiplica-se a taxa de evaporacao ( EVAPORATION_RATE ).
Parametros:
pheromones: matriz de feromonios
n_cities: numero de cidades
Saida:
matriz de feromonios atualizada
*/
void evaporate ( double *pheromones, int n_cities );
/*
reinforce()
Atualiza a matriz de feromonios.
Para cada vertice da melhor solucao corrente, adiciona-se uma quantidade de feromonios.
Parametros:
pheromones: matriz de feromonios
distances: matriz de distancias
min_path: caminho minimo ( melhor solucao ) encontrado
n_cities: numero de cidades
Saida:
matriz de feromonios atualizada
*/
void reinforce ( double *pheromones, int *distances, int *min_path, int n_cities );
/*
run()
Executa o algoritmo de colonia de formigas
Parametros:
distances: matriz de distancias
n_cities: numero de cidades
n_ants: numero de formigas
Saida:
melhor entre as solucoes encontradas por todas as formigas
*/
int *run ( int *distances, int n_cities, int n_ants );
__global__ void cuda_evaporate ( double *pheromones, int n_cities, double evap_rate );
__global__ void cuda_reinforce ( double *pheromones, int *distances, int *path, int n_cities, double amount );
__global__ void cuda_construct_tour (int *tours, int *visited, double *choiceinfo, double *probs, int n_cities );
int main ( int argc, char *argv[] ) {
randomize();
char const *inputname, *outputname;
if ( argc < 2 ) {
cout << "Missing input arguments!" << endl;
cout << "Program " << argv[0] << " takes exactly 3 arguments." << endl;
return 1;
}
if ( argc > 3 ) {
cout << "Too many arguments in program " << argv[0] << "!" << endl;
cout << "It takes exactly 3 arguments." << endl;
return 1;
}
cout << "Running " << argv[0] << " with arguments: ";
for (int i = 1; i < argc; i++)
cout << argv[i] << " ";
cout << endl;
inputname = argv[1];
if ( !argv[2] ) {
outputname = "results/output.txt";
} else {
outputname = argv[2];
}
int n_cities; /* Numero de cidades */
int *distances; /* Matriz de distancias (distancia euclidiana) */
/*
Inicializa a instancia.
Executa o algoritmo e calcula do custo da solucao.
*/
distances = load_instance ( inputname, n_cities );
int *solution = run ( distances, n_cities, threads ( n_cities ) );
int cost = calculate_pathcost ( distances, solution, n_cities );
cout << "Writing results in file " << outputname << "!\n";
ofstream output;
output.open(outputname);
output << "Custo: " << cost << endl;
output << "Melhor solucao encontrada:\n";
for(int i=0; i<n_cities; i++)
output << solution[i] << endl;
cout << argv[0] << " exited with no errors.";
return 0;
}
__global__ void cuda_evaporate ( double *pheromones, int n_cities, double evap_rate ) {
int edge_id = threadIdx.x + blockIdx.x * blockDim.x;
pheromones[ edge_id ] *= evap_rate;
}
__global__ void cuda_reinforce ( double *pheromones, int *distances, int *path, int n_cities, double amount ) {
int col_id = threadIdx.x + blockIdx.x * blockDim.x;
int origin = path[col_id];
int dest = path[col_id+1];
pheromones[ index( n_cities, origin, dest ) ] += amount;
pheromones[ index( n_cities, dest, origin ) ] += amount;
}
__global__ void cuda_construct_tour (int *tours, int *visited, double *choiceinfo, double *probs, int n_cities ) {
int line_id = blockDim.x * blockIdx.x + threadIdx.x;
for (int step = 1; step < n_cities; step++) {
int current = tours[ index ( n_cities, line_id, step - 1 ) ];
double sum_probs = 0.0;
for(int i = 0; i < n_cities; i++) {
if ( visited[ index ( n_cities, line_id, i) ] == 1 )
probs[ index ( n_cities, line_id, i ) ] = 0.0;
else {
double current_prob = choiceinfo[ index( n_cities, current, i ) ];
probs[ index ( n_cities, line_id, i ) ] = current_prob;
sum_probs += current_prob;
}
}
double random;
hiprandState_t state;
hiprand_init ( (unsigned long long) clock(), 0, 0, &state );
random = hiprand_uniform ( &state );
random *= sum_probs;
int next;
double sum = probs[ index ( n_cities, line_id, 0 ) ];
for(next = 0; sum < random; next++) {
sum += probs[ index ( n_cities, line_id, next + 1 ) ];
}
tours[ index ( n_cities, line_id, step ) ] = next;
visited[ index ( n_cities, line_id, next) ] = 1;
}
}
int threads ( int n_ants ) {
int n_threds = 1;
while ( n_threds * 2 < n_ants ) {
n_threds *= 2;
}
return n_threds;
}
int thread_per_block ( int n_ants ) {
int blocks = log(n_ants);
return pow (2, blocks);
}
int *load_instance ( char const *filename, int &n_cities ) {
cout << "Opening file " << filename << endl;
ifstream tsp;
tsp.open (filename);
/*if ( ifstream == NULL ) {
cout << "File " << filename << " not found!\n";
exit(1);
}*/
tsp >> n_cities;
int* distances = (int *) malloc ( n_cities * n_cities * sizeof(int) );
for (int i = 0; i < n_cities; i++)
for (int j = 0; j < n_cities; j++)
tsp >> distances[ index(n_cities, i, j) ];
return distances;
}
int calculate_pathcost ( int *distances, int *path, int n_cities ) {
int cost = 0;
for (int i = 0; i < (n_cities - 1); i++)
cost += distances[ index(n_cities, path[i], path[i+1]) ];
return cost;
}
int *best_solution ( int *tours, int *distances, int n_ants, int n_cities ) {
int *best_tour = &tours[0];
for (int tour = 0; tour < n_ants; tour++)
if (calculate_pathcost(distances, &tours[index(n_cities, tour, 0)], n_cities) < calculate_pathcost(distances, best_tour, n_cities))
best_tour = &tours[index(n_cities, tour, 0)];
return best_tour;
}
void evaporate ( double *pheromones, int n_cities ) {
int size = n_cities * n_cities * sizeof(double);
double *pheromones_device;
hipMalloc ( (void**) &pheromones_device, size);
hipMemcpy (pheromones_device, pheromones, size, hipMemcpyHostToDevice);
hipLaunchKernelGGL(( cuda_evaporate) , dim3(n_cities), dim3(n_cities) , 0, 0, pheromones_device, n_cities, EVAPORATION_RATE );
hipMemcpy (pheromones, pheromones_device, size, hipMemcpyDeviceToHost);
hipFree (pheromones_device);
}
void reinforce ( double *pheromones, int *distances, int *path, int n_cities ) {
double amount = (double) ( 1.0f / (double) calculate_pathcost ( distances, path, n_cities ) );
int size_path = n_cities * sizeof(int);
int size_int = n_cities * n_cities * sizeof(int);
int size_double = n_cities * n_cities * sizeof(double);
int *path_device;
int *distances_device;
double *pheromones_device;
hipMalloc((void**)&path_device, size_path);
hipMalloc((void**)&distances_device, size_int);
hipMalloc((void**)&pheromones_device, size_double);
hipMemcpy (path_device, path, size_path, hipMemcpyHostToDevice);
hipMemcpy (distances_device, distances, size_int, hipMemcpyHostToDevice);
hipMemcpy (pheromones_device, pheromones, size_double, hipMemcpyHostToDevice);
hipLaunchKernelGGL(( cuda_reinforce) , dim3(1), dim3(n_cities - 1) , 0, 0, pheromones_device, distances_device, path_device, n_cities, amount);
hipMemcpy (distances, distances_device, size_int, hipMemcpyDeviceToHost);
hipMemcpy (pheromones, pheromones_device, size_double, hipMemcpyDeviceToHost);
hipFree (path_device);
hipFree (distances_device);
hipFree (pheromones_device);
}
int *run ( int *distances, int n_cities, int n_ants) {
int ph_size = n_cities * n_cities * sizeof(double);
int tours_size = n_ants * n_cities * sizeof(int);
int dist_size = n_cities * n_cities * sizeof(int);
double *pheromones = (double*) malloc ( ph_size );
int *tours = (int*) malloc ( tours_size ); /* Solucoes */
int *visited = (int*) malloc ( tours_size ); /* Lista de cidades visitadas */
double *choiceinfo = (double*) malloc ( ph_size );
int *distances_device; /* Copia da GPU da matriz de distancias */
int *tours_device; /* Copia da GPU da matriz de solucoes */
int *visited_device; /* Copia da GPU da matriz de cidades visitadas */
double *choiceinfo_device; /* Copia da GPU da matriz de probabilidades (numeraodor) */
double *probs; /* Matriz de probabilidades */
hipMalloc ( (void**) &distances_device, dist_size );
hipMalloc ( (void**) &tours_device, tours_size );
hipMalloc ( (void**) &visited_device, tours_size );
hipMalloc ( (void**) &choiceinfo_device, ph_size );
hipMalloc ( (void**) &probs, ph_size);
hipMemcpy ( distances_device, distances, dist_size, hipMemcpyHostToDevice );
/*
Instancia-se a matriz de feromonios.
Inicialmente, todas as arestas possuem a mesma quantidade de feromonios ( INIT_PHEROMONE_AMOUNT ).
*/
for (int i = 0; i < n_cities; i++)
for (int j = 0; j < n_cities; j++)
pheromones[ index(n_cities, i, j) ] = INIT_PHEROMONE_AMOUNT;
for (int iteration = 0; iteration < NUMBER_OF_ITERATIONS; iteration++) {
/*
Reseta todos os caminhos ao inicio de cada iteracao.
Inicialmente, todas as posicoes encontram-se no infinito.
*/
for (int i = 0; i < n_ants; i++)
for (int j = 0; j < n_cities; j++)
tours[ index(n_cities, i, j) ] = Infinity;
for (int i = 0; i < n_ants; i++)
for (int j = 0; j < n_cities; j++)
visited[ index(n_cities, i, j) ] = 0;
/*
Calcula o numerador da funcao de probabilidade.
Em cada iteracao, este valor eh o mesmo para cada formiga, o que encoraja sua execucao aqui,
aumentando o desempenho do algoritmo.
*/
for (int i = 0; i < n_cities; i++) {
for (int j = 0; j < n_cities; j++) {
double edge_pherom = pheromones[ index(n_cities, i, j) ];
double edge_weight = distances[index(n_cities, i, j) ];
double prob = 0.0f;
if ( edge_weight != 0.0f ) {
prob = pow ( edge_pherom, ALFA ) * pow ( (1/edge_weight), BETA );
} else {
prob = pow ( edge_pherom, ALFA ) * pow ( Infinity, BETA );
}
choiceinfo[index(n_cities, i, j)] = prob;
}
}
hipMemcpy ( choiceinfo_device, choiceinfo, ph_size, hipMemcpyHostToDevice );
for (int ant = 0; ant < n_ants; ant++) {
int step = 0;
/*
Uma cidade inicial eh selecionada aleatoriamente.
*/
int init = rand() % n_cities;
/*
Atualiza o tour ( para cada formiga ).
*/
tours [ index ( n_cities, ant, step ) ] = init;
/*
Atualiza a memoria da formiga.
*/
visited [ index ( n_cities, ant, init ) ] = 1;
}
hipMemcpy ( visited_device, visited, tours_size, hipMemcpyHostToDevice );
hipMemcpy ( tours_device, tours, tours_size, hipMemcpyHostToDevice );
int gridDim = n_ants / thread_per_block (n_ants);
int antsPerBlock = thread_per_block (n_ants);
hipLaunchKernelGGL(( cuda_construct_tour) , dim3(gridDim), dim3(antsPerBlock) , 0, 0, tours_device, visited_device, choiceinfo_device, probs, n_cities );
//cuda_construct_tour <<< 1, n_ants >>> ( tours_device, visited_device, choiceinfo_device, probs, n_cities );
hipMemcpy ( tours, tours_device, tours_size, hipMemcpyDeviceToHost );
hipMemcpy ( visited, visited_device, tours_size, hipMemcpyDeviceToHost );
evaporate ( pheromones, n_cities );
int *best = best_solution ( tours, distances, n_ants, n_cities );
reinforce ( pheromones, distances, best, n_cities );
}
hipFree ( distances_device );
hipFree ( tours_device );
hipFree ( visited_device );
hipFree ( choiceinfo_device );
hipFree ( probs );
int *best = best_solution ( tours, distances, n_ants, n_cities );
return best;
} | e47a106ec7421a96439e71aae266e3269c083b74.cu | #include <stdio.h>
#include <stdlib.h>
#include <math.h>
#include <time.h>
#include <fstream>
#include <iostream>
#include <curand.h>
#include <curand_kernel.h>
#define Infinity 65536 /* pow (2, 16) */
/*
randdouble()
Retorna um numero (double) aleatorio entre 0.0f e 1.0f.
Parametros:
Saida:
numero aleaorio entre 0.0 e 1.0.
*/
#define randdouble() ((double)rand()/(double)RAND_MAX)
/*
randomize()
Atualiza o gerador de numeros pseudo-aletatorios.
Parametros:
Saida:
*/
#define randomize() srand((unsigned)time(NULL))
/*
index()
Mapeia uma posicao de uma matriz (2D) para um indice de um vetor (1D).
Parametros:
length: numero de colunas da matriz
line: indice da linha
column: indice da coluna
Saída:
indice mapeado
*/
#define index(length,line,column) (column + line * length)
using namespace std;
const int NUMBER_OF_ITERATIONS = 100;
const double INIT_PHEROMONE_AMOUNT = 1.0;
const double EVAPORATION_RATE = 0.5;
const double ALFA = 1; /* Influencia da trilha de feromonios */
const double BETA = 2; /* Influencia da informacao heuristica */
int threads ( int n_ants );
int thread_per_block ( int n_ants );
/*
load_instance()
Inicializa uma instancia ( numero de cidades e matriz de distancias ) do TSP.
Parametros:
filename: nome do arquivo
n_cities: numero de cidades ( passagem por referencia )
Saida:
matriz de distancias ( distancias euclidianas )
*/
int *load_instance ( char const *filename, int &n_cities );
/*
calculate_pathcost()
Calcula o custo (soma dos custos de todas as arestas) de um caminho .
Parametros:
distances: matriz de distancias
path: caminho ( solucao )
n_cities: numero de cidades
Saida:
custo ( soma de todas as distancias ) do caminho
*/
int calculate_pathcost ( int *distances, int *path, int n_cities );
/*
best_solution()
Retorna a melhor entre as solucoes geradas.
Parametros:
ants: matriz de solucoes
distances: matriz de distancias ( entre cidades )
n_ants: numero de formigas
n_cities: numero de cidades
Saida:
melhor solucao encontrada
*/
int *best_solution ( int *tours, int *distances, int n_ants, int n_cities );
/*
evaporate()
Atualiza a matriz de feromonios aplicando evaporacao.
Para cada vertice, multiplica-se a taxa de evaporacao ( EVAPORATION_RATE ).
Parametros:
pheromones: matriz de feromonios
n_cities: numero de cidades
Saida:
matriz de feromonios atualizada
*/
void evaporate ( double *pheromones, int n_cities );
/*
reinforce()
Atualiza a matriz de feromonios.
Para cada vertice da melhor solucao corrente, adiciona-se uma quantidade de feromonios.
Parametros:
pheromones: matriz de feromonios
distances: matriz de distancias
min_path: caminho minimo ( melhor solucao ) encontrado
n_cities: numero de cidades
Saida:
matriz de feromonios atualizada
*/
void reinforce ( double *pheromones, int *distances, int *min_path, int n_cities );
/*
run()
Executa o algoritmo de colonia de formigas
Parametros:
distances: matriz de distancias
n_cities: numero de cidades
n_ants: numero de formigas
Saida:
melhor entre as solucoes encontradas por todas as formigas
*/
int *run ( int *distances, int n_cities, int n_ants );
__global__ void cuda_evaporate ( double *pheromones, int n_cities, double evap_rate );
__global__ void cuda_reinforce ( double *pheromones, int *distances, int *path, int n_cities, double amount );
__global__ void cuda_construct_tour (int *tours, int *visited, double *choiceinfo, double *probs, int n_cities );
int main ( int argc, char *argv[] ) {
randomize();
char const *inputname, *outputname;
if ( argc < 2 ) {
cout << "Missing input arguments!" << endl;
cout << "Program " << argv[0] << " takes exactly 3 arguments." << endl;
return 1;
}
if ( argc > 3 ) {
cout << "Too many arguments in program " << argv[0] << "!" << endl;
cout << "It takes exactly 3 arguments." << endl;
return 1;
}
cout << "Running " << argv[0] << " with arguments: ";
for (int i = 1; i < argc; i++)
cout << argv[i] << " ";
cout << endl;
inputname = argv[1];
if ( !argv[2] ) {
outputname = "results/output.txt";
} else {
outputname = argv[2];
}
int n_cities; /* Numero de cidades */
int *distances; /* Matriz de distancias (distancia euclidiana) */
/*
Inicializa a instancia.
Executa o algoritmo e calcula do custo da solucao.
*/
distances = load_instance ( inputname, n_cities );
int *solution = run ( distances, n_cities, threads ( n_cities ) );
int cost = calculate_pathcost ( distances, solution, n_cities );
cout << "Writing results in file " << outputname << "!\n";
ofstream output;
output.open(outputname);
output << "Custo: " << cost << endl;
output << "Melhor solucao encontrada:\n";
for(int i=0; i<n_cities; i++)
output << solution[i] << endl;
cout << argv[0] << " exited with no errors.";
return 0;
}
__global__ void cuda_evaporate ( double *pheromones, int n_cities, double evap_rate ) {
int edge_id = threadIdx.x + blockIdx.x * blockDim.x;
pheromones[ edge_id ] *= evap_rate;
}
__global__ void cuda_reinforce ( double *pheromones, int *distances, int *path, int n_cities, double amount ) {
int col_id = threadIdx.x + blockIdx.x * blockDim.x;
int origin = path[col_id];
int dest = path[col_id+1];
pheromones[ index( n_cities, origin, dest ) ] += amount;
pheromones[ index( n_cities, dest, origin ) ] += amount;
}
__global__ void cuda_construct_tour (int *tours, int *visited, double *choiceinfo, double *probs, int n_cities ) {
int line_id = blockDim.x * blockIdx.x + threadIdx.x;
for (int step = 1; step < n_cities; step++) {
int current = tours[ index ( n_cities, line_id, step - 1 ) ];
double sum_probs = 0.0;
for(int i = 0; i < n_cities; i++) {
if ( visited[ index ( n_cities, line_id, i) ] == 1 )
probs[ index ( n_cities, line_id, i ) ] = 0.0;
else {
double current_prob = choiceinfo[ index( n_cities, current, i ) ];
probs[ index ( n_cities, line_id, i ) ] = current_prob;
sum_probs += current_prob;
}
}
double random;
curandState_t state;
curand_init ( (unsigned long long) clock(), 0, 0, &state );
random = curand_uniform ( &state );
random *= sum_probs;
int next;
double sum = probs[ index ( n_cities, line_id, 0 ) ];
for(next = 0; sum < random; next++) {
sum += probs[ index ( n_cities, line_id, next + 1 ) ];
}
tours[ index ( n_cities, line_id, step ) ] = next;
visited[ index ( n_cities, line_id, next) ] = 1;
}
}
int threads ( int n_ants ) {
int n_threds = 1;
while ( n_threds * 2 < n_ants ) {
n_threds *= 2;
}
return n_threds;
}
int thread_per_block ( int n_ants ) {
int blocks = log(n_ants);
return pow (2, blocks);
}
int *load_instance ( char const *filename, int &n_cities ) {
cout << "Opening file " << filename << endl;
ifstream tsp;
tsp.open (filename);
/*if ( ifstream == NULL ) {
cout << "File " << filename << " not found!\n";
exit(1);
}*/
tsp >> n_cities;
int* distances = (int *) malloc ( n_cities * n_cities * sizeof(int) );
for (int i = 0; i < n_cities; i++)
for (int j = 0; j < n_cities; j++)
tsp >> distances[ index(n_cities, i, j) ];
return distances;
}
int calculate_pathcost ( int *distances, int *path, int n_cities ) {
int cost = 0;
for (int i = 0; i < (n_cities - 1); i++)
cost += distances[ index(n_cities, path[i], path[i+1]) ];
return cost;
}
int *best_solution ( int *tours, int *distances, int n_ants, int n_cities ) {
int *best_tour = &tours[0];
for (int tour = 0; tour < n_ants; tour++)
if (calculate_pathcost(distances, &tours[index(n_cities, tour, 0)], n_cities) < calculate_pathcost(distances, best_tour, n_cities))
best_tour = &tours[index(n_cities, tour, 0)];
return best_tour;
}
void evaporate ( double *pheromones, int n_cities ) {
int size = n_cities * n_cities * sizeof(double);
double *pheromones_device;
cudaMalloc ( (void**) &pheromones_device, size);
cudaMemcpy (pheromones_device, pheromones, size, cudaMemcpyHostToDevice);
cuda_evaporate <<< n_cities, n_cities >>> ( pheromones_device, n_cities, EVAPORATION_RATE );
cudaMemcpy (pheromones, pheromones_device, size, cudaMemcpyDeviceToHost);
cudaFree (pheromones_device);
}
void reinforce ( double *pheromones, int *distances, int *path, int n_cities ) {
double amount = (double) ( 1.0f / (double) calculate_pathcost ( distances, path, n_cities ) );
int size_path = n_cities * sizeof(int);
int size_int = n_cities * n_cities * sizeof(int);
int size_double = n_cities * n_cities * sizeof(double);
int *path_device;
int *distances_device;
double *pheromones_device;
cudaMalloc((void**)&path_device, size_path);
cudaMalloc((void**)&distances_device, size_int);
cudaMalloc((void**)&pheromones_device, size_double);
cudaMemcpy (path_device, path, size_path, cudaMemcpyHostToDevice);
cudaMemcpy (distances_device, distances, size_int, cudaMemcpyHostToDevice);
cudaMemcpy (pheromones_device, pheromones, size_double, cudaMemcpyHostToDevice);
cuda_reinforce <<< 1, n_cities - 1 >>> (pheromones_device, distances_device, path_device, n_cities, amount);
cudaMemcpy (distances, distances_device, size_int, cudaMemcpyDeviceToHost);
cudaMemcpy (pheromones, pheromones_device, size_double, cudaMemcpyDeviceToHost);
cudaFree (path_device);
cudaFree (distances_device);
cudaFree (pheromones_device);
}
int *run ( int *distances, int n_cities, int n_ants) {
int ph_size = n_cities * n_cities * sizeof(double);
int tours_size = n_ants * n_cities * sizeof(int);
int dist_size = n_cities * n_cities * sizeof(int);
double *pheromones = (double*) malloc ( ph_size );
int *tours = (int*) malloc ( tours_size ); /* Solucoes */
int *visited = (int*) malloc ( tours_size ); /* Lista de cidades visitadas */
double *choiceinfo = (double*) malloc ( ph_size );
int *distances_device; /* Copia da GPU da matriz de distancias */
int *tours_device; /* Copia da GPU da matriz de solucoes */
int *visited_device; /* Copia da GPU da matriz de cidades visitadas */
double *choiceinfo_device; /* Copia da GPU da matriz de probabilidades (numeraodor) */
double *probs; /* Matriz de probabilidades */
cudaMalloc ( (void**) &distances_device, dist_size );
cudaMalloc ( (void**) &tours_device, tours_size );
cudaMalloc ( (void**) &visited_device, tours_size );
cudaMalloc ( (void**) &choiceinfo_device, ph_size );
cudaMalloc ( (void**) &probs, ph_size);
cudaMemcpy ( distances_device, distances, dist_size, cudaMemcpyHostToDevice );
/*
Instancia-se a matriz de feromonios.
Inicialmente, todas as arestas possuem a mesma quantidade de feromonios ( INIT_PHEROMONE_AMOUNT ).
*/
for (int i = 0; i < n_cities; i++)
for (int j = 0; j < n_cities; j++)
pheromones[ index(n_cities, i, j) ] = INIT_PHEROMONE_AMOUNT;
for (int iteration = 0; iteration < NUMBER_OF_ITERATIONS; iteration++) {
/*
Reseta todos os caminhos ao inicio de cada iteracao.
Inicialmente, todas as posicoes encontram-se no infinito.
*/
for (int i = 0; i < n_ants; i++)
for (int j = 0; j < n_cities; j++)
tours[ index(n_cities, i, j) ] = Infinity;
for (int i = 0; i < n_ants; i++)
for (int j = 0; j < n_cities; j++)
visited[ index(n_cities, i, j) ] = 0;
/*
Calcula o numerador da funcao de probabilidade.
Em cada iteracao, este valor eh o mesmo para cada formiga, o que encoraja sua execucao aqui,
aumentando o desempenho do algoritmo.
*/
for (int i = 0; i < n_cities; i++) {
for (int j = 0; j < n_cities; j++) {
double edge_pherom = pheromones[ index(n_cities, i, j) ];
double edge_weight = distances[index(n_cities, i, j) ];
double prob = 0.0f;
if ( edge_weight != 0.0f ) {
prob = pow ( edge_pherom, ALFA ) * pow ( (1/edge_weight), BETA );
} else {
prob = pow ( edge_pherom, ALFA ) * pow ( Infinity, BETA );
}
choiceinfo[index(n_cities, i, j)] = prob;
}
}
cudaMemcpy ( choiceinfo_device, choiceinfo, ph_size, cudaMemcpyHostToDevice );
for (int ant = 0; ant < n_ants; ant++) {
int step = 0;
/*
Uma cidade inicial eh selecionada aleatoriamente.
*/
int init = rand() % n_cities;
/*
Atualiza o tour ( para cada formiga ).
*/
tours [ index ( n_cities, ant, step ) ] = init;
/*
Atualiza a memoria da formiga.
*/
visited [ index ( n_cities, ant, init ) ] = 1;
}
cudaMemcpy ( visited_device, visited, tours_size, cudaMemcpyHostToDevice );
cudaMemcpy ( tours_device, tours, tours_size, cudaMemcpyHostToDevice );
int gridDim = n_ants / thread_per_block (n_ants);
int antsPerBlock = thread_per_block (n_ants);
cuda_construct_tour <<< gridDim, antsPerBlock >>> ( tours_device, visited_device, choiceinfo_device, probs, n_cities );
//cuda_construct_tour <<< 1, n_ants >>> ( tours_device, visited_device, choiceinfo_device, probs, n_cities );
cudaMemcpy ( tours, tours_device, tours_size, cudaMemcpyDeviceToHost );
cudaMemcpy ( visited, visited_device, tours_size, cudaMemcpyDeviceToHost );
evaporate ( pheromones, n_cities );
int *best = best_solution ( tours, distances, n_ants, n_cities );
reinforce ( pheromones, distances, best, n_cities );
}
cudaFree ( distances_device );
cudaFree ( tours_device );
cudaFree ( visited_device );
cudaFree ( choiceinfo_device );
cudaFree ( probs );
int *best = best_solution ( tours, distances, n_ants, n_cities );
return best;
} |
31cf8f32d1e4ad23359806469b8ca85a185ca5f4.hip | // !!! This is a file automatically generated by hipify!!!
#include <stdbool.h>
#include <stdio.h>
#include <string.h>
#include <getopt.h>
#include <hiprand/hiprand_kernel.h>
#include <stdlib.h>
#include <hip/hip_runtime.h>
#include <sys/time.h>
#include "kernel.hip"
#include<chrono>
#include<iostream>
using namespace std;
using namespace std::chrono;
int blocks_[20][2] = {{8,8},{16,16},{24,24},{32,32},{1,64},{1,128},{1,192},{1,256},{1,320},{1,384},{1,448},{1,512},{1,576},{1,640},{1,704},{1,768},{1,832},{1,896},{1,960},{1,1024}};
int matrices_[7][2] = {{240,240},{496,496},{784,784},{1016,1016},{1232,1232},{1680,1680},{2024,2024}};
int main(int argc, char **argv) {
hipSetDevice(0);
char* p;int matrix_len=strtol(argv[1], &p, 10);
for(int matrix_looper=0;matrix_looper<matrix_len;matrix_looper++){
for(int block_looper=0;block_looper<20;block_looper++){
int XSIZE=matrices_[matrix_looper][0],YSIZE=matrices_[matrix_looper][1],BLOCKX=blocks_[block_looper][0],BLOCKY=blocks_[block_looper][1];
int *A = NULL;
hipMalloc(&A, XSIZE*YSIZE);
int *C = NULL;
hipMalloc(&C, XSIZE*YSIZE);
int *B = NULL;
hipMalloc(&B, XSIZE*YSIZE);
int *result = NULL;
hipMalloc(&result, XSIZE*YSIZE);
int iXSIZE= XSIZE;
int iYSIZE= YSIZE;
while(iXSIZE%BLOCKX!=0)
{
iXSIZE++;
}
while(iYSIZE%BLOCKY!=0)
{
iYSIZE++;
}
dim3 gridBlock(iXSIZE/BLOCKX, iYSIZE/BLOCKY);
dim3 threadBlock(BLOCKX, BLOCKY);
hipFree(0);hipLaunchKernelGGL((
kernel), dim3(gridBlock),dim3(threadBlock), 0, 0, A,C,B,result);
hipDeviceSynchronize();
for (int loop_counter = 0; loop_counter < 10; ++loop_counter) {hipLaunchKernelGGL((
kernel), dim3(gridBlock),dim3(threadBlock), 0, 0, A,C,B,result);
}
auto start = steady_clock::now();
for (int loop_counter = 0; loop_counter < 1000; loop_counter++) {hipLaunchKernelGGL((
kernel), dim3(gridBlock),dim3(threadBlock), 0, 0, A,C,B,result);
}
auto end = steady_clock::now();
auto usecs = duration_cast<duration<float, microseconds::period> >(end - start);
cout <<'['<<usecs.count()<<','<<'('<<BLOCKX<<','<<BLOCKY<<')' << ','<<'('<<XSIZE<<','<<YSIZE<<')'<<']' << endl;
}
}} | 31cf8f32d1e4ad23359806469b8ca85a185ca5f4.cu | #include <stdbool.h>
#include <stdio.h>
#include <string.h>
#include <getopt.h>
#include <curand_kernel.h>
#include <stdlib.h>
#include <cuda.h>
#include <sys/time.h>
#include "kernel.cu"
#include<chrono>
#include<iostream>
using namespace std;
using namespace std::chrono;
int blocks_[20][2] = {{8,8},{16,16},{24,24},{32,32},{1,64},{1,128},{1,192},{1,256},{1,320},{1,384},{1,448},{1,512},{1,576},{1,640},{1,704},{1,768},{1,832},{1,896},{1,960},{1,1024}};
int matrices_[7][2] = {{240,240},{496,496},{784,784},{1016,1016},{1232,1232},{1680,1680},{2024,2024}};
int main(int argc, char **argv) {
cudaSetDevice(0);
char* p;int matrix_len=strtol(argv[1], &p, 10);
for(int matrix_looper=0;matrix_looper<matrix_len;matrix_looper++){
for(int block_looper=0;block_looper<20;block_looper++){
int XSIZE=matrices_[matrix_looper][0],YSIZE=matrices_[matrix_looper][1],BLOCKX=blocks_[block_looper][0],BLOCKY=blocks_[block_looper][1];
int *A = NULL;
cudaMalloc(&A, XSIZE*YSIZE);
int *C = NULL;
cudaMalloc(&C, XSIZE*YSIZE);
int *B = NULL;
cudaMalloc(&B, XSIZE*YSIZE);
int *result = NULL;
cudaMalloc(&result, XSIZE*YSIZE);
int iXSIZE= XSIZE;
int iYSIZE= YSIZE;
while(iXSIZE%BLOCKX!=0)
{
iXSIZE++;
}
while(iYSIZE%BLOCKY!=0)
{
iYSIZE++;
}
dim3 gridBlock(iXSIZE/BLOCKX, iYSIZE/BLOCKY);
dim3 threadBlock(BLOCKX, BLOCKY);
cudaFree(0);
kernel<<<gridBlock,threadBlock>>>(A,C,B,result);
cudaDeviceSynchronize();
for (int loop_counter = 0; loop_counter < 10; ++loop_counter) {
kernel<<<gridBlock,threadBlock>>>(A,C,B,result);
}
auto start = steady_clock::now();
for (int loop_counter = 0; loop_counter < 1000; loop_counter++) {
kernel<<<gridBlock,threadBlock>>>(A,C,B,result);
}
auto end = steady_clock::now();
auto usecs = duration_cast<duration<float, microseconds::period> >(end - start);
cout <<'['<<usecs.count()<<','<<'('<<BLOCKX<<','<<BLOCKY<<')' << ','<<'('<<XSIZE<<','<<YSIZE<<')'<<']' << endl;
}
}} |
59c256ab5f24d82c23f1ee2fb65d5642ef2443f9.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include<stdio.h>
#include<cuda_runtime.h>
#include<device_launch_parameters.h>
__global__ void add(int *a,int *b,int *c)
{
int id=blockIdx.x*blockDim.x+threadIdx.x;
c[id]=a[id]+b[id];
}
int main()
{
int a[10],b[10],c[10],n;
printf("Enter n: ");
scanf("%d",&n);
printf("Enter A:\n");
for(int i=0;i<n;i++)
scanf("%d",&a[i]);
printf("Enter B:\n");
for(int i=0;i<n;i++)
scanf("%d",&b[i]);
int *d_a,*d_b,*d_c;
int size=sizeof(int)*n;
hipMalloc((void**)&d_a,size);
hipMalloc((void**)&d_b,size);
hipMalloc((void**)&d_c,size);
hipMemcpy(d_a,&a,size,hipMemcpyHostToDevice);
hipMemcpy(d_b,&b,size,hipMemcpyHostToDevice);
hipLaunchKernelGGL(( add), dim3(1),dim3(n), 0, 0, d_a,d_b,d_c);
hipMemcpy(&c,d_c,size,hipMemcpyDeviceToHost);
for(int i=0;i<n;i++)
printf("%d ",c[i]);
printf("\n");
hipFree(d_a);
hipFree(d_b);
hipFree(d_c);
}
| 59c256ab5f24d82c23f1ee2fb65d5642ef2443f9.cu | #include<stdio.h>
#include<cuda_runtime.h>
#include<device_launch_parameters.h>
__global__ void add(int *a,int *b,int *c)
{
int id=blockIdx.x*blockDim.x+threadIdx.x;
c[id]=a[id]+b[id];
}
int main()
{
int a[10],b[10],c[10],n;
printf("Enter n: ");
scanf("%d",&n);
printf("Enter A:\n");
for(int i=0;i<n;i++)
scanf("%d",&a[i]);
printf("Enter B:\n");
for(int i=0;i<n;i++)
scanf("%d",&b[i]);
int *d_a,*d_b,*d_c;
int size=sizeof(int)*n;
cudaMalloc((void**)&d_a,size);
cudaMalloc((void**)&d_b,size);
cudaMalloc((void**)&d_c,size);
cudaMemcpy(d_a,&a,size,cudaMemcpyHostToDevice);
cudaMemcpy(d_b,&b,size,cudaMemcpyHostToDevice);
add<<<1,n>>>(d_a,d_b,d_c);
cudaMemcpy(&c,d_c,size,cudaMemcpyDeviceToHost);
for(int i=0;i<n;i++)
printf("%d ",c[i]);
printf("\n");
cudaFree(d_a);
cudaFree(d_b);
cudaFree(d_c);
}
|
9494df4b93ea6ce6d0089edd45395a600a455e3c.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
// CUDA kernels for image ridgeness
// Yuqiong Li
// VinSmart
// 09/25/2020
// http://eigen.tuxfamily.org/dox-devel/TopicCUDA.html
// Note: This is not working with Eigen 3.3 and NVCC 10.1 ! Since the functionality is experimental, it might not have been ported yet.
#include "cuda_kernels.h"
#define CUDA_CHECK_ERROR( err ) (cudaCheckError( err, __FILE__, __LINE__ ))
inline void cudaCheckError( hipError_t err, const char *file, int line )
{
// CUDA error handeling from the "CUDA by example" book
if (err != hipSuccess)
{
printf( "%s in %s at line %d\n", hipGetErrorString( err ), file, line );
exit( EXIT_FAILURE );
}
}
// Get index offset in a 2D matrix at position (i, j)
// row-major memory layout
__host__ __device__ int offset2D(int i, int j, int col) {
return i * col + j;
}
__global__ void dominantVectorKernel(Eigen::Matrix2f *tensor, float *dominant_u, float *dominant_v, int rows, int cols) {
int i = blockIdx.y * blockDim.y + threadIdx.y; // row id
int j = blockIdx.x * blockDim.x + threadIdx.x; // col id
if (i < rows && j < cols) {
int offset = offset2D(i, j, cols);
// get eigen values
Eigen::EigenSolver<Eigen::Matrix2f> eigen_solver;
eigen_solver.compute(tensor[offset]);
Eigen::Vector2f eigen_values = eigen_solver.eigenvalues().real();
Eigen::Matrix2f eigen_vectors = eigen_solver.eigenvectors().real();
// get dominant vector
int index = eigen_values(0) > eigen_values(1) ? 0 : 1;
auto dominant_vector = eigen_vectors.col(index);
// assign results to output buffer
dominant_u[offset] = dominant_vector(0);
dominant_v[offset] = dominant_vector(1);
}
return;
}
// wrapper for the dominantVectorKernel
std::vector<cv::Mat> cudaGetDominantVector(const std::vector<Eigen::Matrix2f> & tensor, int rows, int cols) {
int n = rows * cols;
// Allocate host arrays
float *h_dominant_u = new float[n]();
float *h_dominant_v = new float[n]();
// Allocate device arrays
Eigen::Matrix2f *d_tensor ;
CUDA_CHECK_ERROR(hipMalloc((void **)&d_tensor, sizeof(Eigen::Matrix2f) * n));
float *d_dominant_u;
float *d_dominant_v;
CUDA_CHECK_ERROR(hipMalloc((void **)&d_dominant_u, sizeof(float) * n));
CUDA_CHECK_ERROR(hipMalloc((void **)&d_dominant_v, sizeof(float) * n));
// Copy to device
CUDA_CHECK_ERROR(hipMemcpy(d_tensor, tensor.data(), sizeof(Eigen::Matrix2f) * n, hipMemcpyHostToDevice));
// Run kernel
dim3 blocksPerGrid(ceil(rows/32.0), ceil(cols/32.0), 1);
dim3 threadsPerBlock(32, 32, 1);
hipLaunchKernelGGL(( dominantVectorKernel), dim3(blocksPerGrid), dim3(threadsPerBlock), 0, 0, d_tensor, d_dominant_u, d_dominant_v, rows, cols);
// Copy to host
CUDA_CHECK_ERROR(hipMemcpy(d_dominant_u, h_dominant_u, sizeof(float) * n, hipMemcpyDeviceToHost));
CUDA_CHECK_ERROR(hipMemcpy(d_dominant_v, h_dominant_v, sizeof(float) * n, hipMemcpyDeviceToHost));
cv::Mat dominant_u = cv::Mat(rows, cols, CV_32F, h_dominant_u);
cv::Mat dominant_v = cv::Mat(rows, cols, CV_32F, h_dominant_v);
// Free device memory
hipFree(d_dominant_u);
hipFree(d_dominant_v);
return {dominant_u, dominant_v};
}
| 9494df4b93ea6ce6d0089edd45395a600a455e3c.cu | // CUDA kernels for image ridgeness
// Yuqiong Li
// VinSmart
// 09/25/2020
// http://eigen.tuxfamily.org/dox-devel/TopicCUDA.html
// Note: This is not working with Eigen 3.3 and NVCC 10.1 ! Since the functionality is experimental, it might not have been ported yet.
#include "cuda_kernels.h"
#define CUDA_CHECK_ERROR( err ) (cudaCheckError( err, __FILE__, __LINE__ ))
inline void cudaCheckError( cudaError_t err, const char *file, int line )
{
// CUDA error handeling from the "CUDA by example" book
if (err != cudaSuccess)
{
printf( "%s in %s at line %d\n", cudaGetErrorString( err ), file, line );
exit( EXIT_FAILURE );
}
}
// Get index offset in a 2D matrix at position (i, j)
// row-major memory layout
__host__ __device__ int offset2D(int i, int j, int col) {
return i * col + j;
}
__global__ void dominantVectorKernel(Eigen::Matrix2f *tensor, float *dominant_u, float *dominant_v, int rows, int cols) {
int i = blockIdx.y * blockDim.y + threadIdx.y; // row id
int j = blockIdx.x * blockDim.x + threadIdx.x; // col id
if (i < rows && j < cols) {
int offset = offset2D(i, j, cols);
// get eigen values
Eigen::EigenSolver<Eigen::Matrix2f> eigen_solver;
eigen_solver.compute(tensor[offset]);
Eigen::Vector2f eigen_values = eigen_solver.eigenvalues().real();
Eigen::Matrix2f eigen_vectors = eigen_solver.eigenvectors().real();
// get dominant vector
int index = eigen_values(0) > eigen_values(1) ? 0 : 1;
auto dominant_vector = eigen_vectors.col(index);
// assign results to output buffer
dominant_u[offset] = dominant_vector(0);
dominant_v[offset] = dominant_vector(1);
}
return;
}
// wrapper for the dominantVectorKernel
std::vector<cv::Mat> cudaGetDominantVector(const std::vector<Eigen::Matrix2f> & tensor, int rows, int cols) {
int n = rows * cols;
// Allocate host arrays
float *h_dominant_u = new float[n]();
float *h_dominant_v = new float[n]();
// Allocate device arrays
Eigen::Matrix2f *d_tensor ;
CUDA_CHECK_ERROR(cudaMalloc((void **)&d_tensor, sizeof(Eigen::Matrix2f) * n));
float *d_dominant_u;
float *d_dominant_v;
CUDA_CHECK_ERROR(cudaMalloc((void **)&d_dominant_u, sizeof(float) * n));
CUDA_CHECK_ERROR(cudaMalloc((void **)&d_dominant_v, sizeof(float) * n));
// Copy to device
CUDA_CHECK_ERROR(cudaMemcpy(d_tensor, tensor.data(), sizeof(Eigen::Matrix2f) * n, cudaMemcpyHostToDevice));
// Run kernel
dim3 blocksPerGrid(ceil(rows/32.0), ceil(cols/32.0), 1);
dim3 threadsPerBlock(32, 32, 1);
dominantVectorKernel<<<blocksPerGrid, threadsPerBlock>>> (d_tensor, d_dominant_u, d_dominant_v, rows, cols);
// Copy to host
CUDA_CHECK_ERROR(cudaMemcpy(d_dominant_u, h_dominant_u, sizeof(float) * n, cudaMemcpyDeviceToHost));
CUDA_CHECK_ERROR(cudaMemcpy(d_dominant_v, h_dominant_v, sizeof(float) * n, cudaMemcpyDeviceToHost));
cv::Mat dominant_u = cv::Mat(rows, cols, CV_32F, h_dominant_u);
cv::Mat dominant_v = cv::Mat(rows, cols, CV_32F, h_dominant_v);
// Free device memory
cudaFree(d_dominant_u);
cudaFree(d_dominant_v);
return {dominant_u, dominant_v};
}
|
52adaff0e0698bc668f72c0502db1627cad51395.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
//=======================================================================
// Copyright (c) 2017 Baptiste Wicht
// Distributed under the terms of the MIT License.
// (See accompanying file LICENSE or copy at
// http://opensource.org/licenses/MIT)
//=======================================================================
#include "egblas/equal.hpp"
#include "complex.hpp"
__device__ bool operator==(hipComplex lhs, hipComplex rhs){
return lhs.x == rhs.x && lhs.y == rhs.y;
}
__device__ bool operator==(hipDoubleComplex lhs, hipDoubleComplex rhs){
return lhs.x == rhs.x && lhs.y == rhs.y;
}
template <typename T>
__global__ void equal_kernel(size_t n, const T* a, size_t inca, const T* b, size_t incb, bool* y, size_t incy) {
auto index = threadIdx.x + blockIdx.x * blockDim.x;
auto stride = blockDim.x * gridDim.x;
for (; index < n; index += stride) {
y[incy * index] = a[inca * index] == b[incb * index];
}
}
template <typename T>
void equal_kernel_run(size_t n, const T* a, size_t inca, const T* b, size_t incb, bool* y, size_t incy) {
int blockSize;
int minGridSize;
hipOccupancyMaxPotentialBlockSize(&minGridSize, &blockSize, equal_kernel<T>, 0, 0);
int gridSize = ((n / incy) + blockSize - 1) / blockSize;
hipLaunchKernelGGL(( equal_kernel<T>), dim3(gridSize), dim3(blockSize), 0, 0, n, a, inca, b, incb, y, incy);
#ifdef EGBLAS_SYNCHRONIZE
hipDeviceSynchronize();
#endif
}
void egblas_sequal(size_t n, const float* a, size_t inca, const float* b, size_t incb, bool* y, size_t incy) {
equal_kernel_run(n, a, inca, b, incb, y, incy);
}
void egblas_dequal(size_t n, const double* a, size_t inca, const double* b, size_t incb, bool* y, size_t incy) {
equal_kernel_run(n, a, inca, b, incb, y, incy);
}
void egblas_cequal(size_t n, const hipComplex* a, size_t inca, const hipComplex* b, size_t incb, bool* y, size_t incy) {
equal_kernel_run(n, a, inca, b, incb, y, incy);
}
void egblas_zequal(size_t n, const hipDoubleComplex* a, size_t inca, const hipDoubleComplex* b, size_t incb, bool* y, size_t incy) {
equal_kernel_run(n, a, inca, b, incb, y, incy);
}
| 52adaff0e0698bc668f72c0502db1627cad51395.cu | //=======================================================================
// Copyright (c) 2017 Baptiste Wicht
// Distributed under the terms of the MIT License.
// (See accompanying file LICENSE or copy at
// http://opensource.org/licenses/MIT)
//=======================================================================
#include "egblas/equal.hpp"
#include "complex.hpp"
__device__ bool operator==(cuComplex lhs, cuComplex rhs){
return lhs.x == rhs.x && lhs.y == rhs.y;
}
__device__ bool operator==(cuDoubleComplex lhs, cuDoubleComplex rhs){
return lhs.x == rhs.x && lhs.y == rhs.y;
}
template <typename T>
__global__ void equal_kernel(size_t n, const T* a, size_t inca, const T* b, size_t incb, bool* y, size_t incy) {
auto index = threadIdx.x + blockIdx.x * blockDim.x;
auto stride = blockDim.x * gridDim.x;
for (; index < n; index += stride) {
y[incy * index] = a[inca * index] == b[incb * index];
}
}
template <typename T>
void equal_kernel_run(size_t n, const T* a, size_t inca, const T* b, size_t incb, bool* y, size_t incy) {
int blockSize;
int minGridSize;
cudaOccupancyMaxPotentialBlockSize(&minGridSize, &blockSize, equal_kernel<T>, 0, 0);
int gridSize = ((n / incy) + blockSize - 1) / blockSize;
equal_kernel<T><<<gridSize, blockSize>>>(n, a, inca, b, incb, y, incy);
#ifdef EGBLAS_SYNCHRONIZE
cudaDeviceSynchronize();
#endif
}
void egblas_sequal(size_t n, const float* a, size_t inca, const float* b, size_t incb, bool* y, size_t incy) {
equal_kernel_run(n, a, inca, b, incb, y, incy);
}
void egblas_dequal(size_t n, const double* a, size_t inca, const double* b, size_t incb, bool* y, size_t incy) {
equal_kernel_run(n, a, inca, b, incb, y, incy);
}
void egblas_cequal(size_t n, const cuComplex* a, size_t inca, const cuComplex* b, size_t incb, bool* y, size_t incy) {
equal_kernel_run(n, a, inca, b, incb, y, incy);
}
void egblas_zequal(size_t n, const cuDoubleComplex* a, size_t inca, const cuDoubleComplex* b, size_t incb, bool* y, size_t incy) {
equal_kernel_run(n, a, inca, b, incb, y, incy);
}
|
ce120f82dc9312aeae2ce35ead31503abc548677.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "compute_scores.cuh"
#include <catboost/cuda/methods/kernel/score_calcers.cuh>
#include <catboost/cuda/cuda_util/kernel/instructions.cuh>
#include <catboost/cuda/cuda_util/kernel/random_gen.cuh>
#include <catboost/cuda/cuda_util/kernel/kernel_helpers.cuh>
#include <catboost/cuda/cuda_util/kernel/fill.cuh>
#include <library/cpp/cuda/wrappers/arch.cuh>
#include <contrib/libs/cub/hipcub/hipcub.hpp>
#include <cmath>
#include <exception>
#include <cfloat>
namespace NKernel {
#define ARGMAX() \
__shared__ float scores[BlockSize]; \
scores[tid] = bestScore; \
__shared__ int indices[BlockSize]; \
indices[tid] = bestIndex; \
__syncthreads();\
for (ui32 s = BlockSize >> 1; s > 0; s >>= 1) { \
if (tid < s) { \
if (scores[tid] > scores[tid + s] || (scores[tid] == scores[tid + s] && indices[tid] > indices[tid + s]) ) { \
scores[tid] = scores[tid + s]; \
indices[tid] = indices[tid + s]; \
}\
}\
__syncthreads();\
} \
if (!tid) { \
const int index = indices[0];\
if (index != -1 && index < binFeatureCount) { \
result->FeatureId = bf[index].FeatureId;\
result->BinId = bf[index].BinId;\
result->Score = scores[0];\
} else {\
result->FeatureId = -1;\
result->BinId = -1;\
result->Score = FLT_MAX;\
}\
}
// histId * binFeatureCount * statCount + statId * binFeatureCount + features->FirstFoldIndex;
template <int BlockSize,
class TScoreCalcer>
__global__ void ComputeOptimalSplits(const TCBinFeature* bf,
ui32 binFeatureCount,
const float* binFeaturesWeights, ui32 binFeaturesWeightsCount,
const float* histograms,
const double* partStats, int statCount,
const ui32* partIds, int pCount,
const ui32* restPartIds, int restPartCount,
bool multiclassOptimization,
TScoreCalcer calcer,
TBestSplitProperties* result) {
float bestScore = FLT_MAX;
int bestIndex = -1;
int tid = threadIdx.x;
result += blockIdx.x + blockIdx.y * gridDim.x;
partIds += blockIdx.y * pCount;
for (int offset = blockIdx.x * BlockSize; offset < binFeatureCount; offset += BlockSize * gridDim.x) {
const int binFeatureId = offset + tid;
if (binFeatureId >= binFeatureCount) {
break;
}
if (bf[binFeatureId].SkipInScoreCount) {
continue;
}
calcer.NextFeature(bf[binFeatureId]);
for (int i = 0; i < pCount; i++) {
const int leafId = __ldg(partIds + i);
const float weightLeft = max(__ldg(histograms + leafId * statCount * binFeatureCount + binFeatureId), 0.0f);
const float weightRight = max(__ldg(partStats + leafId * statCount) - weightLeft, 0.0f);
double totalSumLeft = 0;
double totalSumPart = 0;
for (int statId = 1; statId < statCount; ++statId) {
float sumLeft = __ldg(histograms + leafId * statCount * binFeatureCount + statId * binFeatureCount + binFeatureId);
double partStat = __ldg(partStats + leafId * statCount + statId);
totalSumPart += partStat;
float sumRight = static_cast<float>(partStat - sumLeft);
calcer.AddLeaf(sumLeft, weightLeft);
calcer.AddLeaf(sumRight, weightRight);
totalSumLeft += sumLeft;
}
if (multiclassOptimization) {
double totalSumRight = totalSumPart - totalSumLeft;
calcer.AddLeaf(-totalSumLeft, weightLeft);
calcer.AddLeaf(-totalSumRight, weightRight);
}
}
//add fixed leaves
for (int i = 0; i < restPartCount; i++) {
const int leafId = __ldg(restPartIds + i);
const float weight = max(__ldg(partStats + leafId * statCount), 0.0f);
double totalSum = 0;
double totalSumPart = 0;
for (int statId = 1; statId < statCount; ++statId) {
double sum = __ldg(partStats + leafId * statCount + statId);
totalSumPart += sum;
calcer.AddLeaf(sum, weight);
totalSum += sum;
}
if (multiclassOptimization) {
calcer.AddLeaf(-totalSum, weight);
}
}
float score = calcer.GetScore();
ui32 featureId = bf[binFeatureId].FeatureId;
score *= __ldg(binFeaturesWeights + featureId);
if (score < bestScore) {
bestScore = score;
bestIndex = binFeatureId;
}
}
ARGMAX()
}
void ComputeOptimalSplits(const TCBinFeature* binaryFeatures, ui32 binaryFeatureCount,
const float* binFeaturesWeights, ui32 binFeaturesWeightsCount,
const float* histograms,
const double* partStats, int statCount,
const ui32* partIds, int partBlockSize, int partBlockCount,
const ui32* restPartIds, int restPartCount,
TBestSplitProperties* result, ui32 argmaxBlockCount,
EScoreFunction scoreFunction,
bool multiclassOptimization,
double l2,
bool normalize,
double scoreStdDev,
ui64 seed,
TCudaStream stream) {
const int blockSize = 128;
dim3 numBlocks;
numBlocks.x = argmaxBlockCount;
numBlocks.y = partBlockCount;
numBlocks.z = 1;
#define RUN() \
ComputeOptimalSplits<blockSize, TScoreCalcer> << < numBlocks, blockSize, 0, stream >> > (binaryFeatures, binaryFeatureCount, binFeaturesWeights, binFeaturesWeightsCount, histograms, partStats, statCount, partIds, partBlockSize, restPartIds, restPartCount, multiclassOptimization, scoreCalcer, result);
switch (scoreFunction)
{
case EScoreFunction::SolarL2: {
using TScoreCalcer = TSolarScoreCalcer;
TScoreCalcer scoreCalcer(static_cast<float>(l2));
RUN()
break;
}
case EScoreFunction::SatL2: {
using TScoreCalcer = TSatL2ScoreCalcer;
TScoreCalcer scoreCalcer(static_cast<float>(l2));
RUN()
break;
}
case EScoreFunction::LOOL2: {
using TScoreCalcer = TLOOL2ScoreCalcer;
TScoreCalcer scoreCalcer(static_cast<float>(l2));
RUN()
break;
}
case EScoreFunction::L2:
case EScoreFunction::NewtonL2: {
using TScoreCalcer = TL2ScoreCalcer;
TScoreCalcer scoreCalcer(static_cast<float>(l2));
RUN()
break;
}
case EScoreFunction::Cosine:
case EScoreFunction::NewtonCosine: {
using TScoreCalcer = TCosineScoreCalcer;
TCosineScoreCalcer scoreCalcer(static_cast<float>(l2),
normalize,
static_cast<float>(scoreStdDev),
seed);
RUN()
break;
}
default: {
throw std::exception();
}
}
#undef RUN
}
template <int BlockSize>
__global__ void ComputeTargetVarianceImpl(const float* stats,
ui32 size,
ui32 statCount,
ui64 statLineSize,
bool isMulticlass,
double* aggregatedStats) {
ui32 i = BlockSize * blockIdx.x + threadIdx.x;
float weightedSum = 0;
float weightedSum2 = 0;
float totalWeight = 0;
while (i < size) {
const float w = stats[i];
if (w > 1e-15f) {
float statSum = 0;
for (ui32 statId = 1; statId < statCount; ++statId) {
const float wt = stats[i + statLineSize * statId];
weightedSum += wt;
weightedSum2 += wt * wt / w; //cause we need sum w * t * t
statSum += wt;
}
if (isMulticlass) {
weightedSum += -statSum;
weightedSum2 += statSum * statSum / w;
}
totalWeight += w;
}
i += gridDim.x * BlockSize;
}
using BlockReduce = typename hipcub::BlockReduce<double, BlockSize>;
__shared__ typename BlockReduce::TempStorage tempStorage;
double blockWeightedSum = weightedSum;
blockWeightedSum = BlockReduce(tempStorage).Sum(blockWeightedSum);
double blockWeightedSum2 = weightedSum2;
blockWeightedSum2 = BlockReduce(tempStorage).Sum(blockWeightedSum2);
double blockTotalWeight = totalWeight;
blockTotalWeight = BlockReduce(tempStorage).Sum(blockTotalWeight);
if (threadIdx.x == 0) {
TAtomicAdd<double>::Add(aggregatedStats, blockWeightedSum);
TAtomicAdd<double>::Add(aggregatedStats + 1, blockWeightedSum2);
TAtomicAdd<double>::Add(aggregatedStats + 2, blockTotalWeight);
}
}
void ComputeTargetVariance(const float* stats,
ui32 size,
ui32 statCount,
ui64 statLineSize,
bool isMulticlass,
double* aggregatedStats,
TCudaStream stream) {
const ui32 blockSize = 512;
const ui32 numBlocks = min(4 * TArchProps::SMCount(), CeilDivide(size, blockSize));
FillBuffer(aggregatedStats, 0.0, 3, stream);
if (numBlocks) {
hipLaunchKernelGGL(( ComputeTargetVarianceImpl<blockSize>), dim3(numBlocks), dim3(blockSize), 0, stream, stats, size, statCount, statLineSize, isMulticlass, aggregatedStats);
}
}
template <int BlockSize,
class TScoreCalcer>
__global__ void ComputeOptimalSplitsRegion(const TCBinFeature* bf,
ui32 binFeatureCount,
const float* histograms,
const double* partStats, int statCount,
const ui32* partIds,
bool multiclassOptimization,
TScoreCalcer calcer,
TBestSplitProperties* result) {
float bestScore = FLT_MAX;
int bestIndex = -1;
int tid = threadIdx.x;
result += blockIdx.x + blockIdx.y * gridDim.x;
partIds += blockIdx.y;
const int thisPartId = partIds[0];
for (int offset = blockIdx.x * BlockSize; offset < binFeatureCount; offset += BlockSize * gridDim.x) {
const int binFeatureId = offset + tid;
if (binFeatureId >= binFeatureCount) {
break;
}
if (bf[binFeatureId].SkipInScoreCount) {
continue;
}
calcer.NextFeature(bf[binFeatureId]);
TScoreCalcer beforeSplitCalcer = calcer;
const double partWeight = __ldg(partStats + thisPartId * statCount);
const float weightLeft = max(__ldg(histograms + thisPartId * statCount * binFeatureCount + binFeatureId), 0.0f);
const float weightRight = max(partWeight - weightLeft, 0.0f);
bool toZeroPartSplit = false;
if (weightLeft < 1e-20f || weightRight < 1e-20f) {
toZeroPartSplit = true;
}
double totalSumLeft = 0;
double totalSumPart = 0;
for (int statId = 1; statId < statCount; ++statId) {
float sumLeft = __ldg(histograms + thisPartId * statCount * binFeatureCount + statId * binFeatureCount + binFeatureId);
double partStat = __ldg(partStats + thisPartId * statCount + statId);
totalSumPart += partStat;
float sumRight = static_cast<float>(partStat - sumLeft);
calcer.AddLeaf(sumLeft, weightLeft);
calcer.AddLeaf(sumRight, weightRight);
beforeSplitCalcer.AddLeaf(partStat, partWeight);
totalSumLeft += sumLeft;
}
if (multiclassOptimization) {
double totalSumRight = totalSumPart - totalSumLeft;
calcer.AddLeaf(-totalSumLeft, weightLeft);
calcer.AddLeaf(-totalSumRight, weightRight);
beforeSplitCalcer.AddLeaf(-totalSumPart, partWeight);
}
const bool skip = toZeroPartSplit;
const float scoreAfter = !skip ? calcer.GetScore() : FLT_MAX;
const float scoreBefore = !skip ? beforeSplitCalcer.GetScore() : FLT_MAX;
//-10 - 0 = -10
//in gpu catboost all scores are inverse, lower is better
const float gain = !skip ? abs(scoreAfter - scoreBefore) * (scoreAfter < scoreBefore ? -1 : 1) : 0;
if (gain < bestScore) {
bestScore = gain;
bestIndex = binFeatureId;
}
}
ARGMAX()
}
template <int BlockSize,
class TScoreCalcer>
__global__ void ComputeOptimalSplit(const TCBinFeature* bf,
ui32 binFeatureCount,
const float* histograms,
const double* partStats, int statCount,
const int partId,
const int maybeSecondPartId,
bool multiclassOptimization,
TScoreCalcer calcer,
TBestSplitProperties* result) {
float bestScore = FLT_MAX;
int bestIndex = -1;
int tid = threadIdx.x;
result += blockIdx.x + blockIdx.y * gridDim.x;
const int thisPartId = blockIdx.y == 0 ? partId : maybeSecondPartId;
for (int offset = blockIdx.x * BlockSize; offset < binFeatureCount; offset += BlockSize * gridDim.x) {
const int binFeatureId = offset + tid;
if (binFeatureId >= binFeatureCount) {
break;
}
if (bf[binFeatureId].SkipInScoreCount) {
continue;
}
calcer.NextFeature(bf[binFeatureId]);
TScoreCalcer beforeSplitCalcer = calcer;
const double partWeight = __ldg(partStats + thisPartId * statCount);
const float weightLeft = max(__ldg(histograms + thisPartId * statCount * binFeatureCount + binFeatureId), 0.0f);
const float weightRight = max(partWeight - weightLeft, 0.0f);
bool toZeroPartSplit = false;
if (weightLeft < 1e-20f || weightRight < 1e-20f) {
toZeroPartSplit = true;
}
double totalSumLeft = 0;
double totalSumPart = 0;
for (int statId = 1; statId < statCount; ++statId) {
float sumLeft = __ldg(histograms + thisPartId * statCount * binFeatureCount + statId * binFeatureCount + binFeatureId);
double partStat = __ldg(partStats + thisPartId * statCount + statId);
totalSumPart += partStat;
float sumRight = static_cast<float>(partStat - sumLeft);
calcer.AddLeaf(sumLeft, weightLeft);
calcer.AddLeaf(sumRight, weightRight);
beforeSplitCalcer.AddLeaf(partStat, partWeight);
totalSumLeft += sumLeft;
}
if (multiclassOptimization) {
double totalSumRight = totalSumPart - totalSumLeft;
calcer.AddLeaf(-totalSumLeft, weightLeft);
calcer.AddLeaf(-totalSumRight, weightRight);
beforeSplitCalcer.AddLeaf(-totalSumPart, partWeight);
}
const bool skip = toZeroPartSplit;
const float scoreAfter = !skip ? calcer.GetScore() : FLT_MAX;
const float scoreBefore = !skip ? beforeSplitCalcer.GetScore() : FLT_MAX;
//-10 - 0 = -10
//in gpu catboost all scores are inverse, lower is better
const float gain = !skip ? abs(scoreAfter - scoreBefore) * (scoreAfter < scoreBefore ? -1 : 1) : 0;
if (gain < bestScore) {
bestScore = gain;
bestIndex = binFeatureId;
}
}
ARGMAX()
}
void ComputeOptimalSplitsRegion(const TCBinFeature* binaryFeatures, ui32 binaryFeatureCount,
const float* histograms,
const double* partStats, int statCount,
const ui32* partIds, int partCount,
TBestSplitProperties* result, ui32 argmaxBlockCount,
EScoreFunction scoreFunction,
bool multiclassOptimization,
double l2,
bool normalize,
double scoreStdDev,
ui64 seed,
TCudaStream stream) {
const int blockSize = 256;
dim3 numBlocks;
numBlocks.x = argmaxBlockCount;
numBlocks.y = partCount;
numBlocks.z = 1;
#define RUN() \
ComputeOptimalSplitsRegion<blockSize, TScoreCalcer> << < numBlocks, blockSize, 0, stream >> > (binaryFeatures, binaryFeatureCount, histograms, partStats, statCount, partIds, multiclassOptimization, scoreCalcer, result);
switch (scoreFunction)
{
case EScoreFunction::SolarL2: {
using TScoreCalcer = TSolarScoreCalcer;
TScoreCalcer scoreCalcer(static_cast<float>(l2));
RUN()
break;
}
case EScoreFunction::SatL2: {
using TScoreCalcer = TSatL2ScoreCalcer;
TScoreCalcer scoreCalcer(static_cast<float>(l2));
RUN()
break;
}
case EScoreFunction::LOOL2: {
using TScoreCalcer = TLOOL2ScoreCalcer;
TScoreCalcer scoreCalcer(static_cast<float>(l2));
RUN()
break;
}
case EScoreFunction::L2:
case EScoreFunction::NewtonL2: {
using TScoreCalcer = TL2ScoreCalcer;
TScoreCalcer scoreCalcer(static_cast<float>(l2));
RUN()
break;
}
case EScoreFunction::Cosine:
case EScoreFunction::NewtonCosine: {
using TScoreCalcer = TCosineScoreCalcer;
TCosineScoreCalcer scoreCalcer(static_cast<float>(l2),
normalize,
static_cast<float>(scoreStdDev),
seed);
RUN()
break;
}
default: {
throw std::exception();
}
}
#undef RUN
}
void ComputeOptimalSplit(const TCBinFeature* binaryFeatures, ui32 binaryFeatureCount,
const float* histograms,
const double* partStats, int statCount,
ui32 partId, ui32 maybeSecondPartId,
TBestSplitProperties* result, ui32 argmaxBlockCount,
EScoreFunction scoreFunction,
bool multiclassOptimization,
double l2,
bool normalize,
double scoreStdDev,
ui64 seed,
TCudaStream stream) {
const int blockSize = 256;
dim3 numBlocks;
numBlocks.x = argmaxBlockCount;
numBlocks.y = partId == maybeSecondPartId ? 1 : 2;
numBlocks.z = 1;
#define RUN() \
ComputeOptimalSplit<blockSize, TScoreCalcer> << < numBlocks, blockSize, 0, stream >> > (binaryFeatures, binaryFeatureCount, histograms, partStats, statCount, partId, maybeSecondPartId, multiclassOptimization, scoreCalcer, result);
switch (scoreFunction)
{
case EScoreFunction::SolarL2: {
using TScoreCalcer = TSolarScoreCalcer;
TScoreCalcer scoreCalcer(static_cast<float>(l2));
RUN()
break;
}
case EScoreFunction::SatL2: {
using TScoreCalcer = TSatL2ScoreCalcer;
TScoreCalcer scoreCalcer(static_cast<float>(l2));
RUN()
break;
}
case EScoreFunction::LOOL2: {
using TScoreCalcer = TLOOL2ScoreCalcer;
TScoreCalcer scoreCalcer(static_cast<float>(l2));
RUN()
break;
}
case EScoreFunction::L2:
case EScoreFunction::NewtonL2: {
using TScoreCalcer = TL2ScoreCalcer;
TScoreCalcer scoreCalcer(static_cast<float>(l2));
RUN()
break;
}
case EScoreFunction::Cosine:
case EScoreFunction::NewtonCosine: {
using TScoreCalcer = TCosineScoreCalcer;
TCosineScoreCalcer scoreCalcer(static_cast<float>(l2),
normalize,
static_cast<float>(scoreStdDev),
seed);
RUN()
break;
}
default: {
throw std::exception();
}
}
#undef RUN
}
//seems like this'll be faster on CPU
template <class TScoreCalcer>
void ComputeTreeScoreImpl(const double* partStats, int statCount,
const ui32* allPartIds, int allPartCount,
bool multiclassOptimization,
TScoreCalcer calcer,
double* result) {
calcer.NextFeature(TCBinFeature({100500, 42}));
for (int i = 0; i < allPartCount; ++i) {
const int leafId = allPartIds[i];
const double weight = max(partStats[leafId * statCount], 0.0);
double totalSum = 0;
double totalSumPart = 0;
for (int statId = 1; statId < statCount; ++statId) {
double sum = partStats[leafId * statCount + statId];
totalSumPart += sum;
calcer.AddLeaf(sum, weight);
totalSum += sum;
}
if (multiclassOptimization) {
calcer.AddLeaf(-totalSum, weight);
}
}
result[0] = calcer.GetScore();
}
void ComputeTreeScore(
const double* partStats,
int statCount,
const ui32* allPartIds,
int allPartCount,
EScoreFunction scoreFunction,
bool multiclassOptimization,
double l2,
bool normalize,
double scoreStdDev,
ui64 seed,
double* result,
TCudaStream) {
#define RUN() \
ComputeTreeScoreImpl(partStats, statCount, allPartIds, allPartCount, multiclassOptimization, scoreCalcer, result);
switch (scoreFunction)
{
case EScoreFunction::SolarL2: {
using TScoreCalcer = TSolarScoreCalcer;
TScoreCalcer scoreCalcer(static_cast<float>(l2));
RUN()
break;
}
case EScoreFunction::SatL2: {
using TScoreCalcer = TSatL2ScoreCalcer;
TScoreCalcer scoreCalcer(static_cast<float>(l2));
RUN()
break;
}
case EScoreFunction::LOOL2: {
using TScoreCalcer = TLOOL2ScoreCalcer;
TScoreCalcer scoreCalcer(static_cast<float>(l2));
RUN()
break;
}
case EScoreFunction::L2:
case EScoreFunction::NewtonL2: {
using TScoreCalcer = TL2ScoreCalcer;
TScoreCalcer scoreCalcer(static_cast<float>(l2));
RUN()
break;
}
case EScoreFunction::Cosine:
case EScoreFunction::NewtonCosine: {
using TScoreCalcer = TCosineScoreCalcer;
TCosineScoreCalcer scoreCalcer(static_cast<float>(l2),
normalize,
static_cast<float>(scoreStdDev),
seed);
RUN()
break;
}
default: {
throw std::exception();
}
}
#undef RUN
}
#undef ARGMAX
}
| ce120f82dc9312aeae2ce35ead31503abc548677.cu | #include "compute_scores.cuh"
#include <catboost/cuda/methods/kernel/score_calcers.cuh>
#include <catboost/cuda/cuda_util/kernel/instructions.cuh>
#include <catboost/cuda/cuda_util/kernel/random_gen.cuh>
#include <catboost/cuda/cuda_util/kernel/kernel_helpers.cuh>
#include <catboost/cuda/cuda_util/kernel/fill.cuh>
#include <library/cpp/cuda/wrappers/arch.cuh>
#include <contrib/libs/cub/cub/block/block_reduce.cuh>
#include <cmath>
#include <exception>
#include <cfloat>
namespace NKernel {
#define ARGMAX() \
__shared__ float scores[BlockSize]; \
scores[tid] = bestScore; \
__shared__ int indices[BlockSize]; \
indices[tid] = bestIndex; \
__syncthreads();\
for (ui32 s = BlockSize >> 1; s > 0; s >>= 1) { \
if (tid < s) { \
if (scores[tid] > scores[tid + s] || (scores[tid] == scores[tid + s] && indices[tid] > indices[tid + s]) ) { \
scores[tid] = scores[tid + s]; \
indices[tid] = indices[tid + s]; \
}\
}\
__syncthreads();\
} \
if (!tid) { \
const int index = indices[0];\
if (index != -1 && index < binFeatureCount) { \
result->FeatureId = bf[index].FeatureId;\
result->BinId = bf[index].BinId;\
result->Score = scores[0];\
} else {\
result->FeatureId = -1;\
result->BinId = -1;\
result->Score = FLT_MAX;\
}\
}
// histId * binFeatureCount * statCount + statId * binFeatureCount + features->FirstFoldIndex;
template <int BlockSize,
class TScoreCalcer>
__global__ void ComputeOptimalSplits(const TCBinFeature* bf,
ui32 binFeatureCount,
const float* binFeaturesWeights, ui32 binFeaturesWeightsCount,
const float* histograms,
const double* partStats, int statCount,
const ui32* partIds, int pCount,
const ui32* restPartIds, int restPartCount,
bool multiclassOptimization,
TScoreCalcer calcer,
TBestSplitProperties* result) {
float bestScore = FLT_MAX;
int bestIndex = -1;
int tid = threadIdx.x;
result += blockIdx.x + blockIdx.y * gridDim.x;
partIds += blockIdx.y * pCount;
for (int offset = blockIdx.x * BlockSize; offset < binFeatureCount; offset += BlockSize * gridDim.x) {
const int binFeatureId = offset + tid;
if (binFeatureId >= binFeatureCount) {
break;
}
if (bf[binFeatureId].SkipInScoreCount) {
continue;
}
calcer.NextFeature(bf[binFeatureId]);
for (int i = 0; i < pCount; i++) {
const int leafId = __ldg(partIds + i);
const float weightLeft = max(__ldg(histograms + leafId * statCount * binFeatureCount + binFeatureId), 0.0f);
const float weightRight = max(__ldg(partStats + leafId * statCount) - weightLeft, 0.0f);
double totalSumLeft = 0;
double totalSumPart = 0;
for (int statId = 1; statId < statCount; ++statId) {
float sumLeft = __ldg(histograms + leafId * statCount * binFeatureCount + statId * binFeatureCount + binFeatureId);
double partStat = __ldg(partStats + leafId * statCount + statId);
totalSumPart += partStat;
float sumRight = static_cast<float>(partStat - sumLeft);
calcer.AddLeaf(sumLeft, weightLeft);
calcer.AddLeaf(sumRight, weightRight);
totalSumLeft += sumLeft;
}
if (multiclassOptimization) {
double totalSumRight = totalSumPart - totalSumLeft;
calcer.AddLeaf(-totalSumLeft, weightLeft);
calcer.AddLeaf(-totalSumRight, weightRight);
}
}
//add fixed leaves
for (int i = 0; i < restPartCount; i++) {
const int leafId = __ldg(restPartIds + i);
const float weight = max(__ldg(partStats + leafId * statCount), 0.0f);
double totalSum = 0;
double totalSumPart = 0;
for (int statId = 1; statId < statCount; ++statId) {
double sum = __ldg(partStats + leafId * statCount + statId);
totalSumPart += sum;
calcer.AddLeaf(sum, weight);
totalSum += sum;
}
if (multiclassOptimization) {
calcer.AddLeaf(-totalSum, weight);
}
}
float score = calcer.GetScore();
ui32 featureId = bf[binFeatureId].FeatureId;
score *= __ldg(binFeaturesWeights + featureId);
if (score < bestScore) {
bestScore = score;
bestIndex = binFeatureId;
}
}
ARGMAX()
}
void ComputeOptimalSplits(const TCBinFeature* binaryFeatures, ui32 binaryFeatureCount,
const float* binFeaturesWeights, ui32 binFeaturesWeightsCount,
const float* histograms,
const double* partStats, int statCount,
const ui32* partIds, int partBlockSize, int partBlockCount,
const ui32* restPartIds, int restPartCount,
TBestSplitProperties* result, ui32 argmaxBlockCount,
EScoreFunction scoreFunction,
bool multiclassOptimization,
double l2,
bool normalize,
double scoreStdDev,
ui64 seed,
TCudaStream stream) {
const int blockSize = 128;
dim3 numBlocks;
numBlocks.x = argmaxBlockCount;
numBlocks.y = partBlockCount;
numBlocks.z = 1;
#define RUN() \
ComputeOptimalSplits<blockSize, TScoreCalcer> << < numBlocks, blockSize, 0, stream >> > (binaryFeatures, binaryFeatureCount, binFeaturesWeights, binFeaturesWeightsCount, histograms, partStats, statCount, partIds, partBlockSize, restPartIds, restPartCount, multiclassOptimization, scoreCalcer, result);
switch (scoreFunction)
{
case EScoreFunction::SolarL2: {
using TScoreCalcer = TSolarScoreCalcer;
TScoreCalcer scoreCalcer(static_cast<float>(l2));
RUN()
break;
}
case EScoreFunction::SatL2: {
using TScoreCalcer = TSatL2ScoreCalcer;
TScoreCalcer scoreCalcer(static_cast<float>(l2));
RUN()
break;
}
case EScoreFunction::LOOL2: {
using TScoreCalcer = TLOOL2ScoreCalcer;
TScoreCalcer scoreCalcer(static_cast<float>(l2));
RUN()
break;
}
case EScoreFunction::L2:
case EScoreFunction::NewtonL2: {
using TScoreCalcer = TL2ScoreCalcer;
TScoreCalcer scoreCalcer(static_cast<float>(l2));
RUN()
break;
}
case EScoreFunction::Cosine:
case EScoreFunction::NewtonCosine: {
using TScoreCalcer = TCosineScoreCalcer;
TCosineScoreCalcer scoreCalcer(static_cast<float>(l2),
normalize,
static_cast<float>(scoreStdDev),
seed);
RUN()
break;
}
default: {
throw std::exception();
}
}
#undef RUN
}
template <int BlockSize>
__global__ void ComputeTargetVarianceImpl(const float* stats,
ui32 size,
ui32 statCount,
ui64 statLineSize,
bool isMulticlass,
double* aggregatedStats) {
ui32 i = BlockSize * blockIdx.x + threadIdx.x;
float weightedSum = 0;
float weightedSum2 = 0;
float totalWeight = 0;
while (i < size) {
const float w = stats[i];
if (w > 1e-15f) {
float statSum = 0;
for (ui32 statId = 1; statId < statCount; ++statId) {
const float wt = stats[i + statLineSize * statId];
weightedSum += wt;
weightedSum2 += wt * wt / w; //cause we need sum w * t * t
statSum += wt;
}
if (isMulticlass) {
weightedSum += -statSum;
weightedSum2 += statSum * statSum / w;
}
totalWeight += w;
}
i += gridDim.x * BlockSize;
}
using BlockReduce = typename cub::BlockReduce<double, BlockSize>;
__shared__ typename BlockReduce::TempStorage tempStorage;
double blockWeightedSum = weightedSum;
blockWeightedSum = BlockReduce(tempStorage).Sum(blockWeightedSum);
double blockWeightedSum2 = weightedSum2;
blockWeightedSum2 = BlockReduce(tempStorage).Sum(blockWeightedSum2);
double blockTotalWeight = totalWeight;
blockTotalWeight = BlockReduce(tempStorage).Sum(blockTotalWeight);
if (threadIdx.x == 0) {
TAtomicAdd<double>::Add(aggregatedStats, blockWeightedSum);
TAtomicAdd<double>::Add(aggregatedStats + 1, blockWeightedSum2);
TAtomicAdd<double>::Add(aggregatedStats + 2, blockTotalWeight);
}
}
void ComputeTargetVariance(const float* stats,
ui32 size,
ui32 statCount,
ui64 statLineSize,
bool isMulticlass,
double* aggregatedStats,
TCudaStream stream) {
const ui32 blockSize = 512;
const ui32 numBlocks = min(4 * TArchProps::SMCount(), CeilDivide(size, blockSize));
FillBuffer(aggregatedStats, 0.0, 3, stream);
if (numBlocks) {
ComputeTargetVarianceImpl<blockSize><<<numBlocks, blockSize, 0, stream>>>(stats, size, statCount, statLineSize, isMulticlass, aggregatedStats);
}
}
template <int BlockSize,
class TScoreCalcer>
__global__ void ComputeOptimalSplitsRegion(const TCBinFeature* bf,
ui32 binFeatureCount,
const float* histograms,
const double* partStats, int statCount,
const ui32* partIds,
bool multiclassOptimization,
TScoreCalcer calcer,
TBestSplitProperties* result) {
float bestScore = FLT_MAX;
int bestIndex = -1;
int tid = threadIdx.x;
result += blockIdx.x + blockIdx.y * gridDim.x;
partIds += blockIdx.y;
const int thisPartId = partIds[0];
for (int offset = blockIdx.x * BlockSize; offset < binFeatureCount; offset += BlockSize * gridDim.x) {
const int binFeatureId = offset + tid;
if (binFeatureId >= binFeatureCount) {
break;
}
if (bf[binFeatureId].SkipInScoreCount) {
continue;
}
calcer.NextFeature(bf[binFeatureId]);
TScoreCalcer beforeSplitCalcer = calcer;
const double partWeight = __ldg(partStats + thisPartId * statCount);
const float weightLeft = max(__ldg(histograms + thisPartId * statCount * binFeatureCount + binFeatureId), 0.0f);
const float weightRight = max(partWeight - weightLeft, 0.0f);
bool toZeroPartSplit = false;
if (weightLeft < 1e-20f || weightRight < 1e-20f) {
toZeroPartSplit = true;
}
double totalSumLeft = 0;
double totalSumPart = 0;
for (int statId = 1; statId < statCount; ++statId) {
float sumLeft = __ldg(histograms + thisPartId * statCount * binFeatureCount + statId * binFeatureCount + binFeatureId);
double partStat = __ldg(partStats + thisPartId * statCount + statId);
totalSumPart += partStat;
float sumRight = static_cast<float>(partStat - sumLeft);
calcer.AddLeaf(sumLeft, weightLeft);
calcer.AddLeaf(sumRight, weightRight);
beforeSplitCalcer.AddLeaf(partStat, partWeight);
totalSumLeft += sumLeft;
}
if (multiclassOptimization) {
double totalSumRight = totalSumPart - totalSumLeft;
calcer.AddLeaf(-totalSumLeft, weightLeft);
calcer.AddLeaf(-totalSumRight, weightRight);
beforeSplitCalcer.AddLeaf(-totalSumPart, partWeight);
}
const bool skip = toZeroPartSplit;
const float scoreAfter = !skip ? calcer.GetScore() : FLT_MAX;
const float scoreBefore = !skip ? beforeSplitCalcer.GetScore() : FLT_MAX;
//-10 - 0 = -10
//in gpu catboost all scores are inverse, lower is better
const float gain = !skip ? abs(scoreAfter - scoreBefore) * (scoreAfter < scoreBefore ? -1 : 1) : 0;
if (gain < bestScore) {
bestScore = gain;
bestIndex = binFeatureId;
}
}
ARGMAX()
}
template <int BlockSize,
class TScoreCalcer>
__global__ void ComputeOptimalSplit(const TCBinFeature* bf,
ui32 binFeatureCount,
const float* histograms,
const double* partStats, int statCount,
const int partId,
const int maybeSecondPartId,
bool multiclassOptimization,
TScoreCalcer calcer,
TBestSplitProperties* result) {
float bestScore = FLT_MAX;
int bestIndex = -1;
int tid = threadIdx.x;
result += blockIdx.x + blockIdx.y * gridDim.x;
const int thisPartId = blockIdx.y == 0 ? partId : maybeSecondPartId;
for (int offset = blockIdx.x * BlockSize; offset < binFeatureCount; offset += BlockSize * gridDim.x) {
const int binFeatureId = offset + tid;
if (binFeatureId >= binFeatureCount) {
break;
}
if (bf[binFeatureId].SkipInScoreCount) {
continue;
}
calcer.NextFeature(bf[binFeatureId]);
TScoreCalcer beforeSplitCalcer = calcer;
const double partWeight = __ldg(partStats + thisPartId * statCount);
const float weightLeft = max(__ldg(histograms + thisPartId * statCount * binFeatureCount + binFeatureId), 0.0f);
const float weightRight = max(partWeight - weightLeft, 0.0f);
bool toZeroPartSplit = false;
if (weightLeft < 1e-20f || weightRight < 1e-20f) {
toZeroPartSplit = true;
}
double totalSumLeft = 0;
double totalSumPart = 0;
for (int statId = 1; statId < statCount; ++statId) {
float sumLeft = __ldg(histograms + thisPartId * statCount * binFeatureCount + statId * binFeatureCount + binFeatureId);
double partStat = __ldg(partStats + thisPartId * statCount + statId);
totalSumPart += partStat;
float sumRight = static_cast<float>(partStat - sumLeft);
calcer.AddLeaf(sumLeft, weightLeft);
calcer.AddLeaf(sumRight, weightRight);
beforeSplitCalcer.AddLeaf(partStat, partWeight);
totalSumLeft += sumLeft;
}
if (multiclassOptimization) {
double totalSumRight = totalSumPart - totalSumLeft;
calcer.AddLeaf(-totalSumLeft, weightLeft);
calcer.AddLeaf(-totalSumRight, weightRight);
beforeSplitCalcer.AddLeaf(-totalSumPart, partWeight);
}
const bool skip = toZeroPartSplit;
const float scoreAfter = !skip ? calcer.GetScore() : FLT_MAX;
const float scoreBefore = !skip ? beforeSplitCalcer.GetScore() : FLT_MAX;
//-10 - 0 = -10
//in gpu catboost all scores are inverse, lower is better
const float gain = !skip ? abs(scoreAfter - scoreBefore) * (scoreAfter < scoreBefore ? -1 : 1) : 0;
if (gain < bestScore) {
bestScore = gain;
bestIndex = binFeatureId;
}
}
ARGMAX()
}
void ComputeOptimalSplitsRegion(const TCBinFeature* binaryFeatures, ui32 binaryFeatureCount,
const float* histograms,
const double* partStats, int statCount,
const ui32* partIds, int partCount,
TBestSplitProperties* result, ui32 argmaxBlockCount,
EScoreFunction scoreFunction,
bool multiclassOptimization,
double l2,
bool normalize,
double scoreStdDev,
ui64 seed,
TCudaStream stream) {
const int blockSize = 256;
dim3 numBlocks;
numBlocks.x = argmaxBlockCount;
numBlocks.y = partCount;
numBlocks.z = 1;
#define RUN() \
ComputeOptimalSplitsRegion<blockSize, TScoreCalcer> << < numBlocks, blockSize, 0, stream >> > (binaryFeatures, binaryFeatureCount, histograms, partStats, statCount, partIds, multiclassOptimization, scoreCalcer, result);
switch (scoreFunction)
{
case EScoreFunction::SolarL2: {
using TScoreCalcer = TSolarScoreCalcer;
TScoreCalcer scoreCalcer(static_cast<float>(l2));
RUN()
break;
}
case EScoreFunction::SatL2: {
using TScoreCalcer = TSatL2ScoreCalcer;
TScoreCalcer scoreCalcer(static_cast<float>(l2));
RUN()
break;
}
case EScoreFunction::LOOL2: {
using TScoreCalcer = TLOOL2ScoreCalcer;
TScoreCalcer scoreCalcer(static_cast<float>(l2));
RUN()
break;
}
case EScoreFunction::L2:
case EScoreFunction::NewtonL2: {
using TScoreCalcer = TL2ScoreCalcer;
TScoreCalcer scoreCalcer(static_cast<float>(l2));
RUN()
break;
}
case EScoreFunction::Cosine:
case EScoreFunction::NewtonCosine: {
using TScoreCalcer = TCosineScoreCalcer;
TCosineScoreCalcer scoreCalcer(static_cast<float>(l2),
normalize,
static_cast<float>(scoreStdDev),
seed);
RUN()
break;
}
default: {
throw std::exception();
}
}
#undef RUN
}
void ComputeOptimalSplit(const TCBinFeature* binaryFeatures, ui32 binaryFeatureCount,
const float* histograms,
const double* partStats, int statCount,
ui32 partId, ui32 maybeSecondPartId,
TBestSplitProperties* result, ui32 argmaxBlockCount,
EScoreFunction scoreFunction,
bool multiclassOptimization,
double l2,
bool normalize,
double scoreStdDev,
ui64 seed,
TCudaStream stream) {
const int blockSize = 256;
dim3 numBlocks;
numBlocks.x = argmaxBlockCount;
numBlocks.y = partId == maybeSecondPartId ? 1 : 2;
numBlocks.z = 1;
#define RUN() \
ComputeOptimalSplit<blockSize, TScoreCalcer> << < numBlocks, blockSize, 0, stream >> > (binaryFeatures, binaryFeatureCount, histograms, partStats, statCount, partId, maybeSecondPartId, multiclassOptimization, scoreCalcer, result);
switch (scoreFunction)
{
case EScoreFunction::SolarL2: {
using TScoreCalcer = TSolarScoreCalcer;
TScoreCalcer scoreCalcer(static_cast<float>(l2));
RUN()
break;
}
case EScoreFunction::SatL2: {
using TScoreCalcer = TSatL2ScoreCalcer;
TScoreCalcer scoreCalcer(static_cast<float>(l2));
RUN()
break;
}
case EScoreFunction::LOOL2: {
using TScoreCalcer = TLOOL2ScoreCalcer;
TScoreCalcer scoreCalcer(static_cast<float>(l2));
RUN()
break;
}
case EScoreFunction::L2:
case EScoreFunction::NewtonL2: {
using TScoreCalcer = TL2ScoreCalcer;
TScoreCalcer scoreCalcer(static_cast<float>(l2));
RUN()
break;
}
case EScoreFunction::Cosine:
case EScoreFunction::NewtonCosine: {
using TScoreCalcer = TCosineScoreCalcer;
TCosineScoreCalcer scoreCalcer(static_cast<float>(l2),
normalize,
static_cast<float>(scoreStdDev),
seed);
RUN()
break;
}
default: {
throw std::exception();
}
}
#undef RUN
}
//seems like this'll be faster on CPU
template <class TScoreCalcer>
void ComputeTreeScoreImpl(const double* partStats, int statCount,
const ui32* allPartIds, int allPartCount,
bool multiclassOptimization,
TScoreCalcer calcer,
double* result) {
calcer.NextFeature(TCBinFeature({100500, 42}));
for (int i = 0; i < allPartCount; ++i) {
const int leafId = allPartIds[i];
const double weight = max(partStats[leafId * statCount], 0.0);
double totalSum = 0;
double totalSumPart = 0;
for (int statId = 1; statId < statCount; ++statId) {
double sum = partStats[leafId * statCount + statId];
totalSumPart += sum;
calcer.AddLeaf(sum, weight);
totalSum += sum;
}
if (multiclassOptimization) {
calcer.AddLeaf(-totalSum, weight);
}
}
result[0] = calcer.GetScore();
}
void ComputeTreeScore(
const double* partStats,
int statCount,
const ui32* allPartIds,
int allPartCount,
EScoreFunction scoreFunction,
bool multiclassOptimization,
double l2,
bool normalize,
double scoreStdDev,
ui64 seed,
double* result,
TCudaStream) {
#define RUN() \
ComputeTreeScoreImpl(partStats, statCount, allPartIds, allPartCount, multiclassOptimization, scoreCalcer, result);
switch (scoreFunction)
{
case EScoreFunction::SolarL2: {
using TScoreCalcer = TSolarScoreCalcer;
TScoreCalcer scoreCalcer(static_cast<float>(l2));
RUN()
break;
}
case EScoreFunction::SatL2: {
using TScoreCalcer = TSatL2ScoreCalcer;
TScoreCalcer scoreCalcer(static_cast<float>(l2));
RUN()
break;
}
case EScoreFunction::LOOL2: {
using TScoreCalcer = TLOOL2ScoreCalcer;
TScoreCalcer scoreCalcer(static_cast<float>(l2));
RUN()
break;
}
case EScoreFunction::L2:
case EScoreFunction::NewtonL2: {
using TScoreCalcer = TL2ScoreCalcer;
TScoreCalcer scoreCalcer(static_cast<float>(l2));
RUN()
break;
}
case EScoreFunction::Cosine:
case EScoreFunction::NewtonCosine: {
using TScoreCalcer = TCosineScoreCalcer;
TCosineScoreCalcer scoreCalcer(static_cast<float>(l2),
normalize,
static_cast<float>(scoreStdDev),
seed);
RUN()
break;
}
default: {
throw std::exception();
}
}
#undef RUN
}
#undef ARGMAX
}
|
0066466e746b4871a475d12467005f577b9f9485.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "includes.h"
__global__ void __set_lval(long long *A, long long val, int length) {
int ip = threadIdx.x + blockDim.x * (blockIdx.x + gridDim.x * blockIdx.y);
for (int i = ip; i < length; i += blockDim.x * gridDim.x * gridDim.y) {
A[i] = val;
}
} | 0066466e746b4871a475d12467005f577b9f9485.cu | #include "includes.h"
__global__ void __set_lval(long long *A, long long val, int length) {
int ip = threadIdx.x + blockDim.x * (blockIdx.x + gridDim.x * blockIdx.y);
for (int i = ip; i < length; i += blockDim.x * gridDim.x * gridDim.y) {
A[i] = val;
}
} |
c8d67236145e1502e499b8f6909b799c3d1c6df8.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <opencv2/opencv.hpp>
#include <vector>
__global__ void embossShared ( unsigned char * data, unsigned char * out, std::size_t w, std::size_t h) {
auto i = blockIdx.x * (blockDim.x-2) + threadIdx.x;
auto j = blockIdx.y * (blockDim.y-2) + threadIdx.y;
auto li = threadIdx.x;
auto lj = threadIdx.y;
extern __shared__ unsigned char sh[];
if( i < w && j < h ) {
// on s'occupe du rouge
sh[3 * (lj * blockDim.x + li) ] = data[ 3 * ( j * w + i ) ];
sh[3 * (lj * blockDim.x + li) + 1 ] = data[ 3 * ( j * w + i ) + 1];
sh[3 * (lj * blockDim.x + li) + 2 ] = data[ 3 * ( j * w + i ) + 2 ];
__syncthreads();
auto ww = blockDim.x;
if( li > 0 && li < (blockDim.x - 1) && lj > 0 && lj < (blockDim.y - 1) )
{
for (auto c = 0; c < 3; ++c){
auto gu = sh[((lj - 1) * ww + li - 1) * 3 + c] * -18 + sh[((lj - 1) * ww + li + 1) * 3 + c] * 0
+ sh[( lj * ww + li - 1) * 3 + c] * -9 + sh[( lj * ww + li + 1) * 3 + c] * 9
+ sh[((lj + 1) * ww + li - 1) * 3 + c] * 0 + sh[((lj + 1) * ww + li + 1) * 3 + c] * 18
+ sh[(( lj - 1) * ww + li) * 3 + c] * -9 + 9 * sh[( lj * ww + li) * 3 + c]
+ sh[(( lj + 1) * ww + li) * 3 + c] * 9;
out[(j * w + i) * 3 + c] = (gu / 9);
}
}
}
}
int main()
{
cv::Mat m_in = cv::imread("in.jpg", cv::IMREAD_UNCHANGED );
auto rgb = m_in.data;
auto rows = m_in.rows;
auto cols = m_in.cols;
std::vector< unsigned char > g( 3 * rows * cols );
cv::Mat m_out( rows, cols, CV_8UC3, g.data() );
unsigned char * rgb_d;
unsigned char * out;
hipMalloc( &rgb_d, 3 * rows * cols);
hipMalloc( &out, 3 * rows * cols );
hipMemcpy( rgb_d, rgb, 3 * rows * cols, hipMemcpyHostToDevice );
dim3 t( 32, 32 );
dim3 bu( 3 * (( cols - 1) / (t.x-2) + 1) , ( rows - 1 ) / (t.y-2) + 1 );
// dim3 t( 16, 16 );
// dim3 bu( 3 * 2 * (( cols - 1) / (t.x-2) + 1) , (2 * rows - 1 ) / (t.y-2) + 1 );
// dim3 t( 4, 4 );
// dim3 bu( 3 * 8 *(( cols - 1) / (t.x-2) + 1) , (8 * rows - 1 ) / (t.y-2) + 1 );
hipEvent_t start, stop;
hipEventCreate( &start );
hipEventCreate( &stop );
hipEventRecord( start );
hipLaunchKernelGGL(( embossShared), dim3(bu), dim3(t), 3*t.x*t.y , 0, rgb_d, out, cols, rows );
hipMemcpy(g.data(), out, 3 * rows * cols, hipMemcpyDeviceToHost);
hipDeviceSynchronize();
auto hipError_t = hipGetLastError();
// Si pas d'erreur dtecte dans le bordel ben on aura hipSuccess
if (hipError_t != hipSuccess){
std::cout << hipGetErrorName(hipError_t) << std::endl;
std::cout << hipGetErrorString(hipError_t) << std::endl;
}
else {
std::cout << "Aucune erreur" << std::endl;
}
hipEventRecord( stop );
hipEventSynchronize( stop );
float duration = 0.0f;
hipEventElapsedTime( &duration, start, stop );
std::cout << "Total: " << duration << "ms\n";
cv::imwrite( "outEmbossShared.jpg", m_out );
hipFree( rgb_d);
hipFree ( out);
return 0;
}
| c8d67236145e1502e499b8f6909b799c3d1c6df8.cu | #include <opencv2/opencv.hpp>
#include <vector>
__global__ void embossShared ( unsigned char * data, unsigned char * out, std::size_t w, std::size_t h) {
auto i = blockIdx.x * (blockDim.x-2) + threadIdx.x;
auto j = blockIdx.y * (blockDim.y-2) + threadIdx.y;
auto li = threadIdx.x;
auto lj = threadIdx.y;
extern __shared__ unsigned char sh[];
if( i < w && j < h ) {
// on s'occupe du rouge
sh[3 * (lj * blockDim.x + li) ] = data[ 3 * ( j * w + i ) ];
sh[3 * (lj * blockDim.x + li) + 1 ] = data[ 3 * ( j * w + i ) + 1];
sh[3 * (lj * blockDim.x + li) + 2 ] = data[ 3 * ( j * w + i ) + 2 ];
__syncthreads();
auto ww = blockDim.x;
if( li > 0 && li < (blockDim.x - 1) && lj > 0 && lj < (blockDim.y - 1) )
{
for (auto c = 0; c < 3; ++c){
auto gu = sh[((lj - 1) * ww + li - 1) * 3 + c] * -18 + sh[((lj - 1) * ww + li + 1) * 3 + c] * 0
+ sh[( lj * ww + li - 1) * 3 + c] * -9 + sh[( lj * ww + li + 1) * 3 + c] * 9
+ sh[((lj + 1) * ww + li - 1) * 3 + c] * 0 + sh[((lj + 1) * ww + li + 1) * 3 + c] * 18
+ sh[(( lj - 1) * ww + li) * 3 + c] * -9 + 9 * sh[( lj * ww + li) * 3 + c]
+ sh[(( lj + 1) * ww + li) * 3 + c] * 9;
out[(j * w + i) * 3 + c] = (gu / 9);
}
}
}
}
int main()
{
cv::Mat m_in = cv::imread("in.jpg", cv::IMREAD_UNCHANGED );
auto rgb = m_in.data;
auto rows = m_in.rows;
auto cols = m_in.cols;
std::vector< unsigned char > g( 3 * rows * cols );
cv::Mat m_out( rows, cols, CV_8UC3, g.data() );
unsigned char * rgb_d;
unsigned char * out;
cudaMalloc( &rgb_d, 3 * rows * cols);
cudaMalloc( &out, 3 * rows * cols );
cudaMemcpy( rgb_d, rgb, 3 * rows * cols, cudaMemcpyHostToDevice );
dim3 t( 32, 32 );
dim3 bu( 3 * (( cols - 1) / (t.x-2) + 1) , ( rows - 1 ) / (t.y-2) + 1 );
// dim3 t( 16, 16 );
// dim3 bu( 3 * 2 * (( cols - 1) / (t.x-2) + 1) , (2 * rows - 1 ) / (t.y-2) + 1 );
// dim3 t( 4, 4 );
// dim3 bu( 3 * 8 *(( cols - 1) / (t.x-2) + 1) , (8 * rows - 1 ) / (t.y-2) + 1 );
cudaEvent_t start, stop;
cudaEventCreate( &start );
cudaEventCreate( &stop );
cudaEventRecord( start );
embossShared<<< bu, t, 3*t.x*t.y >>>( rgb_d, out, cols, rows );
cudaMemcpy(g.data(), out, 3 * rows * cols, cudaMemcpyDeviceToHost);
cudaDeviceSynchronize();
auto cudaError = cudaGetLastError();
// Si pas d'erreur détectée dans le bordel ben on aura cudaSuccess
if (cudaError != cudaSuccess){
std::cout << cudaGetErrorName(cudaError) << std::endl;
std::cout << cudaGetErrorString(cudaError) << std::endl;
}
else {
std::cout << "Aucune erreur" << std::endl;
}
cudaEventRecord( stop );
cudaEventSynchronize( stop );
float duration = 0.0f;
cudaEventElapsedTime( &duration, start, stop );
std::cout << "Total: " << duration << "ms\n";
cv::imwrite( "outEmbossShared.jpg", m_out );
cudaFree( rgb_d);
cudaFree ( out);
return 0;
}
|
b5f38ba9fca25adce02b9dc39133fe8950a93dff.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#ifdef __DEVICE_EMULATION__
#define EMUSYNC __syncthreads();
#else
#define EMUSYNC
#endif
#include "support_kernels.cu"
//Reduce function to get the minimum timestep
__device__ void get_TnextD(const int n_bodies,
double2 *time,
double *tnext, volatile double *sdata) {
//float2 time : x is time begin, y is time end
// perform first level of reduction,
// reading from global memory, writing to shared memory
const int blockSize = blockDim.x;
unsigned int tid = threadIdx.x;
unsigned int i = blockIdx.x*(blockSize*2) + threadIdx.x;
unsigned int gridSize = blockSize*2*gridDim.x;
sdata[tid] = 1.0e10f;
double tmin = 1.0e10f;
// we reduce multiple elements per thread. The number is determined by the
// number of active thread blocks (via gridSize). More blocks will result
// in a larger gridSize and therefore fewer elements per thread
while (i < n_bodies) {
if (i < n_bodies) tmin = fmin(tmin, time[i ].y);
if (i + blockSize < n_bodies) tmin = fmin(tmin, time[i + blockSize].y);
i += gridSize;
}
sdata[tid] = tmin;
__syncthreads();
// do reduction in shared mem
if (blockSize >= 512) { if (tid < 256) { sdata[tid] = tmin = fmin(tmin, sdata[tid + 256]); } __syncthreads(); }
if (blockSize >= 256) { if (tid < 128) { sdata[tid] = tmin = fmin(tmin, sdata[tid + 128]); } __syncthreads(); }
if (blockSize >= 128) { if (tid < 64) { sdata[tid] = tmin = fmin(tmin, sdata[tid + 64]); } __syncthreads(); }
#ifndef __DEVICE_EMULATION__
if (tid < 32)
#endif
{
if (blockSize >= 64) { sdata[tid] = tmin = fmin(tmin, sdata[tid + 32]); EMUSYNC; }
if (blockSize >= 32) { sdata[tid] = tmin = fmin(tmin, sdata[tid + 16]); EMUSYNC; }
if (blockSize >= 16) { sdata[tid] = tmin = fmin(tmin, sdata[tid + 8]); EMUSYNC; }
if (blockSize >= 8) { sdata[tid] = tmin = fmin(tmin, sdata[tid + 4]); EMUSYNC; }
if (blockSize >= 4) { sdata[tid] = tmin = fmin(tmin, sdata[tid + 2]); EMUSYNC; }
if (blockSize >= 2) { sdata[tid] = tmin = fmin(tmin, sdata[tid + 1]); EMUSYNC; }
}
// write result for this block to global mem
if (tid == 0) tnext[blockIdx.x] = sdata[0];
}
extern "C" __global__ void get_Tnext(const int n_bodies,
double2 *time,
double *tnext) {
extern __shared__ double sdata[];
get_TnextD(n_bodies, time, tnext, sdata);
}
//Reduce function to get the number of active particles
__device__ void get_nactiveD(const int n_bodies,
uint *valid,
uint *tnact, volatile int *sdataInt) {
// perform first level of reduction,
// reading from global memory, writing to shared memory
const int blockSize = blockDim.x;
unsigned int tid = threadIdx.x;
unsigned int i = blockIdx.x*(blockSize*2) + threadIdx.x;
unsigned int gridSize = blockSize*2*gridDim.x;
sdataInt[tid] = 0;
int sum = 0;
// we reduce multiple elements per thread. The number is determined by the
// number of active thread blocks (via gridSize). More blocks will result
// in a larger gridSize and therefore fewer elements per thread
while (i < n_bodies) {
if (i < n_bodies) sum = sum + valid[i ];
if (i + blockSize < n_bodies) sum = sum + valid[i + blockSize];
i += gridSize;
}
sdataInt[tid] = sum;
__syncthreads();
// do reduction in shared mem
if (blockSize >= 512) { if (tid < 256) { sdataInt[tid] = sum = sum + sdataInt[tid + 256]; } __syncthreads(); }
if (blockSize >= 256) { if (tid < 128) { sdataInt[tid] = sum = sum + sdataInt[tid + 128]; } __syncthreads(); }
if (blockSize >= 128) { if (tid < 64) { sdataInt[tid] = sum = sum + sdataInt[tid + 64]; } __syncthreads(); }
#ifndef __DEVICE_EMULATION__
if (tid < 32)
#endif
{
if (blockSize >= 64) { sdataInt[tid] = sum = sum + sdataInt[tid + 32]; EMUSYNC; }
if (blockSize >= 32) { sdataInt[tid] = sum = sum + sdataInt[tid + 16]; EMUSYNC; }
if (blockSize >= 16) { sdataInt[tid] = sum = sum + sdataInt[tid + 8]; EMUSYNC; }
if (blockSize >= 8) { sdataInt[tid] = sum = sum + sdataInt[tid + 4]; EMUSYNC; }
if (blockSize >= 4) { sdataInt[tid] = sum = sum + sdataInt[tid + 2]; EMUSYNC; }
if (blockSize >= 2) { sdataInt[tid] = sum = sum + sdataInt[tid + 1]; EMUSYNC; }
}
// write result for this block to global mem
if (tid == 0) tnact[blockIdx.x] = sdataInt[0];
}
//Reduce function to get the number of active particles
extern "C" __global__ void get_nactive(const int n_bodies,
uint *valid,
uint *tnact) {
extern __shared__ int sdataInt[];
get_nactiveD(n_bodies, valid, tnact, sdataInt);
}
#if 0
extern "C" __global__ void predict_particles(const int n_bodies,
float tc,
float tp,
real4 *pos,
real4 *vel,
real4 *acc,
float2 *time,
uint *body2grouplist,
uint *valid_list){
const uint bid = blockIdx.y * gridDim.x + blockIdx.x;
const uint tid = threadIdx.x;
const uint idx = bid * blockDim.x + tid;
if (idx >= n_bodies) return;
float4 p = pos [idx];
float4 v = vel [idx];
float4 a = acc [idx];
float tb = time[idx].x;
float te = time[idx].y;
float dt_cb = tc - tb;
float dt_pb = tp - tb;
v.x -= a.x*dt_pb;
v.y -= a.y*dt_pb;
v.z -= a.z*dt_pb;
p.x -= (v.x*dt_pb + a.x*dt_pb*dt_pb*0.5f);
p.y -= (v.y*dt_pb + a.y*dt_pb*dt_pb*0.5f);
p.z -= (v.z*dt_pb + a.z*dt_pb*dt_pb*0.5f);
p.x += (v.x*dt_cb + a.x*dt_cb*dt_cb*0.5f);
p.y += (v.y*dt_cb + a.y*dt_cb*dt_cb*0.5f);
p.z += (v.z*dt_cb + a.z*dt_cb*dt_cb*0.5f);
v.x += a.x*dt_cb;
v.y += a.y*dt_cb;
v.z += a.z*dt_cb;
pos[idx] = p;
vel[idx] = v;
//Set the group to active if the time current = time end of
//this particle. Can be that multiple particles write to the
//same location but the net result is the same
int grpID = body2grouplist[idx];
if(tc == te)
{
valid_list[grpID] = grpID | (1 << 31);
}
}
#endif
#if 1
extern "C" __global__ void predict_particles(const int n_bodies,
double tc,
double tp,
real4 *pos,
real4 *vel,
real4 *acc,
double2 *time,
uint *body2grouplist,
uint *valid_list,
real4 *pPos,
real4 *pVel){
const uint bid = blockIdx.y * gridDim.x + blockIdx.x;
const uint tid = threadIdx.x;
const uint idx = bid * blockDim.x + tid;
if (idx >= n_bodies) return;
float4 p = pos [idx];
float4 v = vel [idx];
float4 a = acc [idx];
double tb = time[idx].x;
double te = time[idx].y;
float dt_cb = (float)(tc - tb);
// float dt_pb = tp - tb;
p.x += v.x*dt_cb + a.x*dt_cb*dt_cb*0.5f;
p.y += v.y*dt_cb + a.y*dt_cb*dt_cb*0.5f;
p.z += v.z*dt_cb + a.z*dt_cb*dt_cb*0.5f;
v.x += a.x*dt_cb;
v.y += a.y*dt_cb;
v.z += a.z*dt_cb;
pPos[idx] = p;
pVel[idx] = v;
//Set the group to active if the time current = time end of
//this particle. Can be that multiple particles write to the
//same location but the net result is the same
int grpID = body2grouplist[idx];
if(tc == te)
{
valid_list[grpID] = grpID | (1 << 31);
}
}
#endif
extern "C" __global__ void setActiveGroups(const int n_bodies,
double tc,
double2 *time,
uint *body2grouplist,
uint *valid_list){
const uint bid = blockIdx.y * gridDim.x + blockIdx.x;
const uint tid = threadIdx.x;
const uint idx = bid * blockDim.x + tid;
if (idx >= n_bodies) return;
double te = time[idx].y;
//Set the group to active if the time current = time end of
//this particle. Can be that multiple particles write to the
//same location but the net result is the same
int grpID = body2grouplist[idx];
//Test not only the article with current time, but any particle
//with time diff less then 1/16k
/* if(te-tc <= (1./16384))
{
valid_list[grpID] = grpID | (1 << 31);
}
*/
if(te <= tc)
{
valid_list[grpID] = grpID | (1 << 31);
}
}
#ifdef _AMUSE_STOPPING_CONDITIONS_
extern "C" __global__ void correct_particles(const int n_bodies,
double tc,
double2 *time,
uint *active_list,
real4 *vel,
real4 *acc0,
real4 *acc1,
real4 *pos,
real4 *pPos,
real4 *pVel,
int *ngb,
int *pairDetection) {
const int bid = blockIdx.y * gridDim.x + blockIdx.x;
const int tid = threadIdx.y * blockDim.x + threadIdx.x;
const int dim = blockDim.x * blockDim.y;
int idx = bid * dim + tid;
if (idx >= n_bodies) return;
//Check if particle is set to active during approx grav
if (active_list[idx] != 1) return;
float4 v = vel [idx];
float4 a0 = acc0[idx];
float4 a1 = acc1[idx];
double tb = time[idx].x;
// float dt = time[idx].y;
float dt_cb = (float)(tc - tb);
//Store the predicted position as the one to use
pos[idx] = pPos[idx];
//Correct the position
v = pVel[idx];
dt_cb *= 0.5f;
v.x += (a1.x - a0.x)*dt_cb;
v.y += (a1.y - a0.y)*dt_cb;
v.z += (a1.z - a0.z)*dt_cb;
//Store the corrected velocity, accelaration and the new time step info
vel [idx] = v;
acc0[idx] = a1;
//Code specific to stopping conditions
int j = ngb[idx];
#if 1
if(j >= 0) //Only check if we have a valid nearby neighbour
{
float4 posi = pPos[idx];
float4 posj = pPos[j];
float radj = vel[j].w; //Particle radius is stored in w component of velocity
float radi = v.w;
//Compute distance and compare to summed radius
float ds2 = ((posi.x-posj.x)*(posi.x-posj.x)) +
((posi.y-posj.y)*(posi.y-posj.y)) +
((posi.z-posj.z)*(posi.z-posj.z));
float rsum = radi + radj;
if (ds2 <= rsum*rsum)
{
float4 veli = pVel[idx];
float4 velj = pVel[j];
//Compute distance and compare to summed radius
float r = ((posi.x-posj.x)*(posi.x-posj.x)) +
((posi.y-posj.y)*(posi.y-posj.y)) +
((posi.z-posj.z)*(posi.z-posj.z));
float v = ((veli.x-velj.x)*(veli.x-velj.x)) +
((veli.y-velj.y)*(veli.y-velj.y)) +
((veli.z-velj.z)*(veli.z-velj.z));
float vr = ((posi.x-posj.x)*(veli.x-velj.x)) +
((posi.y-posj.y)*(veli.y-velj.y)) +
((posi.z-posj.z)*(veli.z-velj.z));
//TODO remove these expensive operations instead just
//do vr*vr and EPS*EPS
r = sqrt(r);
v = sqrt(v);
#define EPS 0.001 // see couple/multiples.py
// if (abs(vr) < EPS*r*v)
if(1) //JB: 9 sept 13 . Disabled untill we figure out why tests fail
{
//Collision detected, store the indices of the involved particles
//Note that this will create double items in the final list
//if j is nearest neighbour of i and i nearest neighbour of j
pairDetection[2*idx+0] = idx | (1 << 31);
pairDetection[2*idx+1] = j | (1 << 31);
//Another option is to store it like this, but this destroys the
//info about pairs
}
}//if ds2 <=
}//if j >= 0
#endif
}
#else
extern "C" __global__ void correct_particles(const int n_bodies,
double tc,
double2 *time,
uint *active_list,
real4 *vel,
real4 *acc0,
real4 *acc1,
real4 *pos,
real4 *pPos,
real4 *pVel) {
const int bid = blockIdx.y * gridDim.x + blockIdx.x;
const int tid = threadIdx.y * blockDim.x + threadIdx.x;
const int dim = blockDim.x * blockDim.y;
int idx = bid * dim + tid;
if (idx >= n_bodies) return;
//Check if particle is set to active during approx grav
if (active_list[idx] != 1) return;
float4 v = vel [idx];
float4 a0 = acc0[idx];
float4 a1 = acc1[idx];
float tb = time[idx].x;
// float dt = time[idx].y;
float dt_cb = (float)(tc - tb);
//Store the predicted position as the one to use
pos[idx] = pPos[idx];
//Correct the position
v = pVel[idx];
dt_cb *= 0.5f;
v.x += (a1.x - a0.x)*dt_cb;
v.y += (a1.y - a0.y)*dt_cb;
v.z += (a1.z - a0.z)*dt_cb;
//Store the corrected velocity, accelaration and the new time step info
vel [idx] = v;
acc0[idx] = a1;
// time[idx] = (float2){tc, tc + dt};
}
#endif
#if 0
extern "C" __global__ void correct_particles(const int n_bodies,
float tc,
float2 *time,
uint *active_list,
real4 *vel,
real4 *acc0,
real4 *acc1) {
const int bid = blockIdx.y * gridDim.x + blockIdx.x;
const int tid = threadIdx.y * blockDim.x + threadIdx.x;
const int dim = blockDim.x * blockDim.y;
int idx = bid * dim + tid;
if (idx >= n_bodies) return;
//Check if particle is set to active during approx grav
if (active_list[idx] != 1) return;
float4 v = vel [idx];
float4 a0 = acc0[idx];
float4 a1 = acc1[idx];
float tb = time[idx].x;
float dt_cb = tc - tb;
v.x -= a0.x * dt_cb;
v.y -= a0.y * dt_cb;
v.z -= a0.z * dt_cb;
dt_cb *= 0.5f;
v.x += (a0.x + a1.x)*dt_cb;
v.y += (a0.y + a1.y)*dt_cb;
v.z += (a0.z + a1.z)*dt_cb;
vel [idx] = v;
acc0[idx] = a1;
}
#endif
extern "C" __global__ void compute_dt(const int n_bodies,
double tc,
float eta,
int dt_limit,
float eps2,
double2 *time,
real4 *vel,
int *ngb,
real4 *bodies_pos,
real4 *bodies_acc,
uint *active_list,
float timeStep){
const int bid = blockIdx.y * gridDim.x + blockIdx.x;
const int tid = threadIdx.y * blockDim.x + threadIdx.x;
const int dim = blockDim.x * blockDim.y;
int idx = bid * dim + tid;
if (idx >= n_bodies) return;
//Check if particle is set to active during approx grav
if (active_list[idx] != 1) return;
int j = ngb[idx];
float4 ri, rj;
float4 vi, vj;
float4 ai, aj;
float ds2, mi, mj;
ri = bodies_pos[idx];
mi = ri.w;
vi = vel[idx];
ai = bodies_acc[idx];
int j1, j2;
if (j >= 0) {
rj = bodies_pos[j];
float3 dr = {ri.x - rj.x,
ri.y - rj.y,
ri.z - rj.z};
ds2 = dr.x*dr.x + dr.y*dr.y + dr.z*dr.z;
} else {
j1 = max(0, idx - 1);
rj = bodies_pos[j1];
mj = rj.w;
float3 dr = {ri.x - rj.x,
ri.y - rj.y,
ri.z - rj.z};
if (idx != j1) ds2 = dr.x*dr.x + dr.y*dr.y + dr.z*dr.z;
else ds2 = 1.0e10f;
j2 = min(n_bodies-1, idx + 1);
rj = bodies_pos[j2];
dr = (float3){ri.x - rj.x,
ri.y - rj.y,
ri.z - rj.z};
if (idx != j2) {
if (dr.x*dr.x + dr.y*dr.y + dr.z*dr.z < ds2) {
ds2 = dr.x*dr.x + dr.y*dr.y + dr.z*dr.z;
j = j2;
mj = rj.w;
} else {
j = j1;
};
} else {
j = j1;
}
}
//Add softening to the distance between the chosen particles
ds2 += eps2;
vj = vel[j];
aj = bodies_acc[j];
const float3 vda = make_float3(ai.x - aj.x,
ai.y - aj.y,
ai.z - aj.z);
const float3 vdv = make_float3(vi.x - vj.x,
vi.y - vj.y,
vi.z - vj.z);
const float vs2 = vdv.x*vdv.x + vdv.y*vdv.y + vdv.z*vdv.z;
//Compute the minimum crossing time
const float mct = (ds2*ds2) / (vs2*vs2);
//Free fall time
float da2 = vda.x*vda.x + vda.y*vda.y + vda.z*vda.z;
float mij = mi + mj; //Sum masses
da2 *= (mij*mij);
const float fft = (ds2 / da2);
//Time step is minimum of the free fall time and minimum crossing time
float dt_est = sqrt(sqrt(min(mct, fft)));
//Make it a power of 2
float dt_param = eta; //eta
// float dt_param = 1.0; //eta
float dt = dt_est*dt_param;
int power = -(int)__log2f(dt) + 1;
power = max(power, dt_limit);
int count = 0;
dt = 1.0f/(1 << power);
while(fmodf(tc, dt) != 0.0f)
{
dt *= 0.5f; // could be slow!
count++;
if(count > 30)
{
dt = timeStep;
break;
}
}
//if(dt < 1./16384) dt = 1./16384;
//if(dt < 1./1048576) dt = 1./1048576;
time[idx].x = tc;
#ifdef ADAPTIVE_TIMESTEP
//Prevent a time-step smaller than specified through the interface
if(dt < timeStep)
dt = timeStep;
time[idx].y = tc + (double)dt;
#else
time[idx].y = tc + timeStep;
#endif
// if(idx % 1000 == 0)
// time[idx].y = tc + 1./2048 ;
// else
// time[idx].y = tc + timeStep;
#if 0
ds2 = ds2*__powf(10.0f, 0.666667f) + eps2;
// ds2 += eps2;
vj = vel[j];
aj = bodies_acc[j];
float3 vda = {ai.x - aj.x,
ai.y - aj.y,
ai.z - aj.z};
float3 vdv = {vi.x - vj.x,
vi.y - vj.y,
vi.z - vj.z};
float da = sqrtf(vda.x*vda.x + vda.y*vda.y + vda.z*vda.z);
float dv = sqrtf(vdv.x*vdv.x + vdv.y*vdv.y + vdv.z*vdv.z);
float ds = sqrtf(ds2);
float dt = eta * dv/da*(sqrt(2*da*ds/(dv*dv) + 1) - 1);
int power = -(int)__log2f(dt) + 1;
power = max(power, dt_limit);
dt = 1.0f/(1 << power);
while(fmodf(tc, dt) != 0.0f) dt *= 0.5f; // could be slow!
// dt = 0.015625;
dt = 1.0f/(1 << 8);
dt = 1.0f/(1 << 6);
dt = 1.0f/(1 << 7);
dt = timeStep;
time[idx].x = tc;
//time[idx].y = tc + dt;
time[idx].y = tc + dt;
#endif
}
//Reduce function to get the energy of the system in single precision
__device__ void compute_energyD(const int n_bodies,
real4 *pos,
real4 *vel,
real4 *acc,
float2 *energy, volatile float *shDataKin) {
// perform first level of reduction,
// reading from global memory, writing to shared memory
const int blockSize = blockDim.x;
unsigned int tid = threadIdx.x;
unsigned int i = blockIdx.x*(blockSize*2) + threadIdx.x;
unsigned int gridSize = blockSize*2*gridDim.x;
volatile float *shDataPot = (float*)&shDataKin [blockSize];
float eKin, ePot;
shDataKin[tid] = eKin = 0; //Stores Ekin
shDataPot[tid] = ePot = 0; //Stores Epot
real4 temp;
// we reduce multiple elements per thread. The number is determined by the
// number of active thread blocks (via gridSize). More blocks will result
// in a larger gridSize and therefore fewer elements per thread
while (i < n_bodies) {
if (i < n_bodies)
{
//Ekin
temp = vel[i];
eKin += pos[i].w*0.5*(temp.x*temp.x + temp.y*temp.y + temp.z*temp.z);
//Epot
ePot += pos[i].w*0.5*acc[i].w;
}
if (i + blockSize < n_bodies)
{
temp = vel[i + blockSize];
eKin += pos[i + blockSize].w*0.5*(temp.x*temp.x + temp.y*temp.y + temp.z*temp.z);
//Epot
ePot += pos[i + blockSize].w*0.5*acc[i + blockSize].w;
}
i += gridSize;
}
shDataKin[tid] = eKin;
shDataPot[tid] = ePot;
__syncthreads();
// do reduction in shared mem
if (blockSize >= 512) { if (tid < 256) {
shDataPot[tid] = ePot = ePot + shDataPot[tid + 256];
shDataKin[tid] = eKin = eKin + shDataKin[tid + 256]; } __syncthreads(); }
if (blockSize >= 256) { if (tid < 128) {
shDataPot[tid] = ePot = ePot + shDataPot[tid + 128];
shDataKin[tid] = eKin = eKin + shDataKin[tid + 128]; } __syncthreads(); }
if (blockSize >= 128) { if (tid < 64) {
shDataPot[tid] = ePot = ePot + shDataPot[tid + 64];
shDataKin[tid] = eKin = eKin + shDataKin[tid + 64]; } __syncthreads(); }
#ifndef __DEVICE_EMULATION__
if (tid < 32)
#endif
{
if (blockSize >= 64) {shDataKin[tid] = eKin = eKin + shDataKin[tid + 32]; shDataPot[tid] = ePot = ePot + shDataPot[tid + 32]; EMUSYNC; }
if (blockSize >= 32) {shDataKin[tid] = eKin = eKin + shDataKin[tid + 16]; shDataPot[tid] = ePot = ePot + shDataPot[tid + 16]; EMUSYNC; }
if (blockSize >= 16) {shDataKin[tid] = eKin = eKin + shDataKin[tid + 8]; shDataPot[tid] = ePot = ePot + shDataPot[tid + 8]; EMUSYNC; }
if (blockSize >= 8) {shDataKin[tid] = eKin = eKin + shDataKin[tid + 4]; shDataPot[tid] = ePot = ePot + shDataPot[tid + 4]; EMUSYNC; }
if (blockSize >= 4) {shDataKin[tid] = eKin = eKin + shDataKin[tid + 2]; shDataPot[tid] = ePot = ePot + shDataPot[tid + 2]; EMUSYNC; }
if (blockSize >= 2) {shDataKin[tid] = eKin = eKin + shDataKin[tid + 1]; shDataPot[tid] = ePot = ePot + shDataPot[tid + 1]; EMUSYNC; }
}
// write result for this block to global mem
if (tid == 0) energy[blockIdx.x] = (float2){shDataKin[0], shDataPot[0] };
}
extern "C" __global__ void compute_energy(const int n_bodies,
real4 *pos,
real4 *vel,
real4 *acc,
float2 *energy) {
extern __shared__ float shDataKin[];
compute_energyD(n_bodies, pos, vel, acc, energy,shDataKin);
}
//Reduce function to get the energy of the system in double precision
__device__ void compute_energy_doubleD(const int n_bodies,
real4 *pos,
real4 *vel,
real4 *acc,
double2 *energy, volatile double *shDDataKin) {
// perform first level of reduction,
// reading from global memory, writing to shared memory
const int blockSize = blockDim.x;
unsigned int tid = threadIdx.x;
unsigned int i = blockIdx.x*(blockSize*2) + threadIdx.x;
unsigned int gridSize = blockSize*2*gridDim.x;
volatile double *shDDataPot = (double*)&shDDataKin [blockSize];
double eKin, ePot;
shDDataKin[tid] = eKin = 0; //Stores Ekin
shDDataPot[tid] = ePot = 0; //Stores Epot
real4 temp;
// we reduce multiple elements per thread. The number is determined by the
// number of active thread blocks (via gridSize). More blocks will result
// in a larger gridSize and therefore fewer elements per thread
while (i < n_bodies) {
if (i < n_bodies)
{
//Ekin
temp = vel[i];
eKin += pos[i].w*0.5*(temp.x*temp.x + temp.y*temp.y + temp.z*temp.z);
//Epot
ePot += pos[i].w*0.5*acc[i].w;
}
if (i + blockSize < n_bodies)
{
temp = vel[i + blockSize];
eKin += pos[i + blockSize].w*0.5*(temp.x*temp.x + temp.y*temp.y + temp.z*temp.z);
//Epot
ePot += pos[i + blockSize].w*0.5*acc[i + blockSize].w;
}
i += gridSize;
}
shDDataKin[tid] = eKin;
shDDataPot[tid] = ePot;
__syncthreads();
// do reduction in shared mem
if (blockSize >= 512) { if (tid < 256) {
shDDataPot[tid] = ePot = ePot + shDDataPot[tid + 256];
shDDataKin[tid] = eKin = eKin + shDDataKin[tid + 256]; } __syncthreads(); }
if (blockSize >= 256) { if (tid < 128) {
shDDataPot[tid] = ePot = ePot + shDDataPot[tid + 128];
shDDataKin[tid] = eKin = eKin + shDDataKin[tid + 128]; } __syncthreads(); }
if (blockSize >= 128) { if (tid < 64) {
shDDataPot[tid] = ePot = ePot + shDDataPot[tid + 64];
shDDataKin[tid] = eKin = eKin + shDDataKin[tid + 64]; } __syncthreads(); }
#ifndef __DEVICE_EMULATION__
if (tid < 32)
#endif
{
if (blockSize >= 64) {shDDataKin[tid] = eKin = eKin + shDDataKin[tid + 32]; shDDataPot[tid] = ePot = ePot + shDDataPot[tid + 32]; EMUSYNC; }
if (blockSize >= 32) {shDDataKin[tid] = eKin = eKin + shDDataKin[tid + 16]; shDDataPot[tid] = ePot = ePot + shDDataPot[tid + 16]; EMUSYNC; }
if (blockSize >= 16) {shDDataKin[tid] = eKin = eKin + shDDataKin[tid + 8]; shDDataPot[tid] = ePot = ePot + shDDataPot[tid + 8]; EMUSYNC; }
if (blockSize >= 8) {shDDataKin[tid] = eKin = eKin + shDDataKin[tid + 4]; shDDataPot[tid] = ePot = ePot + shDDataPot[tid + 4]; EMUSYNC; }
if (blockSize >= 4) {shDDataKin[tid] = eKin = eKin + shDDataKin[tid + 2]; shDDataPot[tid] = ePot = ePot + shDDataPot[tid + 2]; EMUSYNC; }
if (blockSize >= 2) {shDDataKin[tid] = eKin = eKin + shDDataKin[tid + 1]; shDDataPot[tid] = ePot = ePot + shDDataPot[tid + 1]; EMUSYNC; }
}
// write result for this block to global mem
if (tid == 0) energy[blockIdx.x] = (double2){shDDataKin[0], shDDataPot[0] };
}
//Reduce function to get the energy of the system
extern "C" __global__ void compute_energy_double(const int n_bodies,
real4 *pos,
real4 *vel,
real4 *acc,
double2 *energy) {
extern __shared__ double shDDataKin[];
compute_energy_doubleD(n_bodies, pos, vel, acc, energy, shDDataKin);
}
extern "C" __global__ void distanceCheck(const int n_bodies,
real4 *pos,
int *ids,
real4 *out,
const int numberOfBH,
real4 *vel)
{
const int bid = blockIdx.y * gridDim.x + blockIdx.x;
const int tid = threadIdx.y * blockDim.x + threadIdx.x;
const int dim = blockDim.x * blockDim.y;
int idx = bid * dim + tid;
if (idx >= n_bodies) return;
int partID = ids[idx];
if(partID < numberOfBH)
{
real4 curPos = pos[idx];
//curPos.w = partID;
out[partID*2+0] = curPos;
out[partID*2+1] = vel[idx];
}
}
| b5f38ba9fca25adce02b9dc39133fe8950a93dff.cu | #ifdef __DEVICE_EMULATION__
#define EMUSYNC __syncthreads();
#else
#define EMUSYNC
#endif
#include "support_kernels.cu"
//Reduce function to get the minimum timestep
__device__ void get_TnextD(const int n_bodies,
double2 *time,
double *tnext, volatile double *sdata) {
//float2 time : x is time begin, y is time end
// perform first level of reduction,
// reading from global memory, writing to shared memory
const int blockSize = blockDim.x;
unsigned int tid = threadIdx.x;
unsigned int i = blockIdx.x*(blockSize*2) + threadIdx.x;
unsigned int gridSize = blockSize*2*gridDim.x;
sdata[tid] = 1.0e10f;
double tmin = 1.0e10f;
// we reduce multiple elements per thread. The number is determined by the
// number of active thread blocks (via gridSize). More blocks will result
// in a larger gridSize and therefore fewer elements per thread
while (i < n_bodies) {
if (i < n_bodies) tmin = fmin(tmin, time[i ].y);
if (i + blockSize < n_bodies) tmin = fmin(tmin, time[i + blockSize].y);
i += gridSize;
}
sdata[tid] = tmin;
__syncthreads();
// do reduction in shared mem
if (blockSize >= 512) { if (tid < 256) { sdata[tid] = tmin = fmin(tmin, sdata[tid + 256]); } __syncthreads(); }
if (blockSize >= 256) { if (tid < 128) { sdata[tid] = tmin = fmin(tmin, sdata[tid + 128]); } __syncthreads(); }
if (blockSize >= 128) { if (tid < 64) { sdata[tid] = tmin = fmin(tmin, sdata[tid + 64]); } __syncthreads(); }
#ifndef __DEVICE_EMULATION__
if (tid < 32)
#endif
{
if (blockSize >= 64) { sdata[tid] = tmin = fmin(tmin, sdata[tid + 32]); EMUSYNC; }
if (blockSize >= 32) { sdata[tid] = tmin = fmin(tmin, sdata[tid + 16]); EMUSYNC; }
if (blockSize >= 16) { sdata[tid] = tmin = fmin(tmin, sdata[tid + 8]); EMUSYNC; }
if (blockSize >= 8) { sdata[tid] = tmin = fmin(tmin, sdata[tid + 4]); EMUSYNC; }
if (blockSize >= 4) { sdata[tid] = tmin = fmin(tmin, sdata[tid + 2]); EMUSYNC; }
if (blockSize >= 2) { sdata[tid] = tmin = fmin(tmin, sdata[tid + 1]); EMUSYNC; }
}
// write result for this block to global mem
if (tid == 0) tnext[blockIdx.x] = sdata[0];
}
extern "C" __global__ void get_Tnext(const int n_bodies,
double2 *time,
double *tnext) {
extern __shared__ double sdata[];
get_TnextD(n_bodies, time, tnext, sdata);
}
//Reduce function to get the number of active particles
__device__ void get_nactiveD(const int n_bodies,
uint *valid,
uint *tnact, volatile int *sdataInt) {
// perform first level of reduction,
// reading from global memory, writing to shared memory
const int blockSize = blockDim.x;
unsigned int tid = threadIdx.x;
unsigned int i = blockIdx.x*(blockSize*2) + threadIdx.x;
unsigned int gridSize = blockSize*2*gridDim.x;
sdataInt[tid] = 0;
int sum = 0;
// we reduce multiple elements per thread. The number is determined by the
// number of active thread blocks (via gridSize). More blocks will result
// in a larger gridSize and therefore fewer elements per thread
while (i < n_bodies) {
if (i < n_bodies) sum = sum + valid[i ];
if (i + blockSize < n_bodies) sum = sum + valid[i + blockSize];
i += gridSize;
}
sdataInt[tid] = sum;
__syncthreads();
// do reduction in shared mem
if (blockSize >= 512) { if (tid < 256) { sdataInt[tid] = sum = sum + sdataInt[tid + 256]; } __syncthreads(); }
if (blockSize >= 256) { if (tid < 128) { sdataInt[tid] = sum = sum + sdataInt[tid + 128]; } __syncthreads(); }
if (blockSize >= 128) { if (tid < 64) { sdataInt[tid] = sum = sum + sdataInt[tid + 64]; } __syncthreads(); }
#ifndef __DEVICE_EMULATION__
if (tid < 32)
#endif
{
if (blockSize >= 64) { sdataInt[tid] = sum = sum + sdataInt[tid + 32]; EMUSYNC; }
if (blockSize >= 32) { sdataInt[tid] = sum = sum + sdataInt[tid + 16]; EMUSYNC; }
if (blockSize >= 16) { sdataInt[tid] = sum = sum + sdataInt[tid + 8]; EMUSYNC; }
if (blockSize >= 8) { sdataInt[tid] = sum = sum + sdataInt[tid + 4]; EMUSYNC; }
if (blockSize >= 4) { sdataInt[tid] = sum = sum + sdataInt[tid + 2]; EMUSYNC; }
if (blockSize >= 2) { sdataInt[tid] = sum = sum + sdataInt[tid + 1]; EMUSYNC; }
}
// write result for this block to global mem
if (tid == 0) tnact[blockIdx.x] = sdataInt[0];
}
//Reduce function to get the number of active particles
extern "C" __global__ void get_nactive(const int n_bodies,
uint *valid,
uint *tnact) {
extern __shared__ int sdataInt[];
get_nactiveD(n_bodies, valid, tnact, sdataInt);
}
#if 0
extern "C" __global__ void predict_particles(const int n_bodies,
float tc,
float tp,
real4 *pos,
real4 *vel,
real4 *acc,
float2 *time,
uint *body2grouplist,
uint *valid_list){
const uint bid = blockIdx.y * gridDim.x + blockIdx.x;
const uint tid = threadIdx.x;
const uint idx = bid * blockDim.x + tid;
if (idx >= n_bodies) return;
float4 p = pos [idx];
float4 v = vel [idx];
float4 a = acc [idx];
float tb = time[idx].x;
float te = time[idx].y;
float dt_cb = tc - tb;
float dt_pb = tp - tb;
v.x -= a.x*dt_pb;
v.y -= a.y*dt_pb;
v.z -= a.z*dt_pb;
p.x -= (v.x*dt_pb + a.x*dt_pb*dt_pb*0.5f);
p.y -= (v.y*dt_pb + a.y*dt_pb*dt_pb*0.5f);
p.z -= (v.z*dt_pb + a.z*dt_pb*dt_pb*0.5f);
p.x += (v.x*dt_cb + a.x*dt_cb*dt_cb*0.5f);
p.y += (v.y*dt_cb + a.y*dt_cb*dt_cb*0.5f);
p.z += (v.z*dt_cb + a.z*dt_cb*dt_cb*0.5f);
v.x += a.x*dt_cb;
v.y += a.y*dt_cb;
v.z += a.z*dt_cb;
pos[idx] = p;
vel[idx] = v;
//Set the group to active if the time current = time end of
//this particle. Can be that multiple particles write to the
//same location but the net result is the same
int grpID = body2grouplist[idx];
if(tc == te)
{
valid_list[grpID] = grpID | (1 << 31);
}
}
#endif
#if 1
extern "C" __global__ void predict_particles(const int n_bodies,
double tc,
double tp,
real4 *pos,
real4 *vel,
real4 *acc,
double2 *time,
uint *body2grouplist,
uint *valid_list,
real4 *pPos,
real4 *pVel){
const uint bid = blockIdx.y * gridDim.x + blockIdx.x;
const uint tid = threadIdx.x;
const uint idx = bid * blockDim.x + tid;
if (idx >= n_bodies) return;
float4 p = pos [idx];
float4 v = vel [idx];
float4 a = acc [idx];
double tb = time[idx].x;
double te = time[idx].y;
float dt_cb = (float)(tc - tb);
// float dt_pb = tp - tb;
p.x += v.x*dt_cb + a.x*dt_cb*dt_cb*0.5f;
p.y += v.y*dt_cb + a.y*dt_cb*dt_cb*0.5f;
p.z += v.z*dt_cb + a.z*dt_cb*dt_cb*0.5f;
v.x += a.x*dt_cb;
v.y += a.y*dt_cb;
v.z += a.z*dt_cb;
pPos[idx] = p;
pVel[idx] = v;
//Set the group to active if the time current = time end of
//this particle. Can be that multiple particles write to the
//same location but the net result is the same
int grpID = body2grouplist[idx];
if(tc == te)
{
valid_list[grpID] = grpID | (1 << 31);
}
}
#endif
extern "C" __global__ void setActiveGroups(const int n_bodies,
double tc,
double2 *time,
uint *body2grouplist,
uint *valid_list){
const uint bid = blockIdx.y * gridDim.x + blockIdx.x;
const uint tid = threadIdx.x;
const uint idx = bid * blockDim.x + tid;
if (idx >= n_bodies) return;
double te = time[idx].y;
//Set the group to active if the time current = time end of
//this particle. Can be that multiple particles write to the
//same location but the net result is the same
int grpID = body2grouplist[idx];
//Test not only the article with current time, but any particle
//with time diff less then 1/16k
/* if(te-tc <= (1./16384))
{
valid_list[grpID] = grpID | (1 << 31);
}
*/
if(te <= tc)
{
valid_list[grpID] = grpID | (1 << 31);
}
}
#ifdef _AMUSE_STOPPING_CONDITIONS_
extern "C" __global__ void correct_particles(const int n_bodies,
double tc,
double2 *time,
uint *active_list,
real4 *vel,
real4 *acc0,
real4 *acc1,
real4 *pos,
real4 *pPos,
real4 *pVel,
int *ngb,
int *pairDetection) {
const int bid = blockIdx.y * gridDim.x + blockIdx.x;
const int tid = threadIdx.y * blockDim.x + threadIdx.x;
const int dim = blockDim.x * blockDim.y;
int idx = bid * dim + tid;
if (idx >= n_bodies) return;
//Check if particle is set to active during approx grav
if (active_list[idx] != 1) return;
float4 v = vel [idx];
float4 a0 = acc0[idx];
float4 a1 = acc1[idx];
double tb = time[idx].x;
// float dt = time[idx].y;
float dt_cb = (float)(tc - tb);
//Store the predicted position as the one to use
pos[idx] = pPos[idx];
//Correct the position
v = pVel[idx];
dt_cb *= 0.5f;
v.x += (a1.x - a0.x)*dt_cb;
v.y += (a1.y - a0.y)*dt_cb;
v.z += (a1.z - a0.z)*dt_cb;
//Store the corrected velocity, accelaration and the new time step info
vel [idx] = v;
acc0[idx] = a1;
//Code specific to stopping conditions
int j = ngb[idx];
#if 1
if(j >= 0) //Only check if we have a valid nearby neighbour
{
float4 posi = pPos[idx];
float4 posj = pPos[j];
float radj = vel[j].w; //Particle radius is stored in w component of velocity
float radi = v.w;
//Compute distance and compare to summed radius
float ds2 = ((posi.x-posj.x)*(posi.x-posj.x)) +
((posi.y-posj.y)*(posi.y-posj.y)) +
((posi.z-posj.z)*(posi.z-posj.z));
float rsum = radi + radj;
if (ds2 <= rsum*rsum)
{
float4 veli = pVel[idx];
float4 velj = pVel[j];
//Compute distance and compare to summed radius
float r = ((posi.x-posj.x)*(posi.x-posj.x)) +
((posi.y-posj.y)*(posi.y-posj.y)) +
((posi.z-posj.z)*(posi.z-posj.z));
float v = ((veli.x-velj.x)*(veli.x-velj.x)) +
((veli.y-velj.y)*(veli.y-velj.y)) +
((veli.z-velj.z)*(veli.z-velj.z));
float vr = ((posi.x-posj.x)*(veli.x-velj.x)) +
((posi.y-posj.y)*(veli.y-velj.y)) +
((posi.z-posj.z)*(veli.z-velj.z));
//TODO remove these expensive operations instead just
//do vr*vr and EPS*EPS
r = sqrt(r);
v = sqrt(v);
#define EPS 0.001 // see couple/multiples.py
// if (abs(vr) < EPS*r*v)
if(1) //JB: 9 sept 13 . Disabled untill we figure out why tests fail
{
//Collision detected, store the indices of the involved particles
//Note that this will create double items in the final list
//if j is nearest neighbour of i and i nearest neighbour of j
pairDetection[2*idx+0] = idx | (1 << 31);
pairDetection[2*idx+1] = j | (1 << 31);
//Another option is to store it like this, but this destroys the
//info about pairs
}
}//if ds2 <=
}//if j >= 0
#endif
}
#else
extern "C" __global__ void correct_particles(const int n_bodies,
double tc,
double2 *time,
uint *active_list,
real4 *vel,
real4 *acc0,
real4 *acc1,
real4 *pos,
real4 *pPos,
real4 *pVel) {
const int bid = blockIdx.y * gridDim.x + blockIdx.x;
const int tid = threadIdx.y * blockDim.x + threadIdx.x;
const int dim = blockDim.x * blockDim.y;
int idx = bid * dim + tid;
if (idx >= n_bodies) return;
//Check if particle is set to active during approx grav
if (active_list[idx] != 1) return;
float4 v = vel [idx];
float4 a0 = acc0[idx];
float4 a1 = acc1[idx];
float tb = time[idx].x;
// float dt = time[idx].y;
float dt_cb = (float)(tc - tb);
//Store the predicted position as the one to use
pos[idx] = pPos[idx];
//Correct the position
v = pVel[idx];
dt_cb *= 0.5f;
v.x += (a1.x - a0.x)*dt_cb;
v.y += (a1.y - a0.y)*dt_cb;
v.z += (a1.z - a0.z)*dt_cb;
//Store the corrected velocity, accelaration and the new time step info
vel [idx] = v;
acc0[idx] = a1;
// time[idx] = (float2){tc, tc + dt};
}
#endif
#if 0
extern "C" __global__ void correct_particles(const int n_bodies,
float tc,
float2 *time,
uint *active_list,
real4 *vel,
real4 *acc0,
real4 *acc1) {
const int bid = blockIdx.y * gridDim.x + blockIdx.x;
const int tid = threadIdx.y * blockDim.x + threadIdx.x;
const int dim = blockDim.x * blockDim.y;
int idx = bid * dim + tid;
if (idx >= n_bodies) return;
//Check if particle is set to active during approx grav
if (active_list[idx] != 1) return;
float4 v = vel [idx];
float4 a0 = acc0[idx];
float4 a1 = acc1[idx];
float tb = time[idx].x;
float dt_cb = tc - tb;
v.x -= a0.x * dt_cb;
v.y -= a0.y * dt_cb;
v.z -= a0.z * dt_cb;
dt_cb *= 0.5f;
v.x += (a0.x + a1.x)*dt_cb;
v.y += (a0.y + a1.y)*dt_cb;
v.z += (a0.z + a1.z)*dt_cb;
vel [idx] = v;
acc0[idx] = a1;
}
#endif
extern "C" __global__ void compute_dt(const int n_bodies,
double tc,
float eta,
int dt_limit,
float eps2,
double2 *time,
real4 *vel,
int *ngb,
real4 *bodies_pos,
real4 *bodies_acc,
uint *active_list,
float timeStep){
const int bid = blockIdx.y * gridDim.x + blockIdx.x;
const int tid = threadIdx.y * blockDim.x + threadIdx.x;
const int dim = blockDim.x * blockDim.y;
int idx = bid * dim + tid;
if (idx >= n_bodies) return;
//Check if particle is set to active during approx grav
if (active_list[idx] != 1) return;
int j = ngb[idx];
float4 ri, rj;
float4 vi, vj;
float4 ai, aj;
float ds2, mi, mj;
ri = bodies_pos[idx];
mi = ri.w;
vi = vel[idx];
ai = bodies_acc[idx];
int j1, j2;
if (j >= 0) {
rj = bodies_pos[j];
float3 dr = {ri.x - rj.x,
ri.y - rj.y,
ri.z - rj.z};
ds2 = dr.x*dr.x + dr.y*dr.y + dr.z*dr.z;
} else {
j1 = max(0, idx - 1);
rj = bodies_pos[j1];
mj = rj.w;
float3 dr = {ri.x - rj.x,
ri.y - rj.y,
ri.z - rj.z};
if (idx != j1) ds2 = dr.x*dr.x + dr.y*dr.y + dr.z*dr.z;
else ds2 = 1.0e10f;
j2 = min(n_bodies-1, idx + 1);
rj = bodies_pos[j2];
dr = (float3){ri.x - rj.x,
ri.y - rj.y,
ri.z - rj.z};
if (idx != j2) {
if (dr.x*dr.x + dr.y*dr.y + dr.z*dr.z < ds2) {
ds2 = dr.x*dr.x + dr.y*dr.y + dr.z*dr.z;
j = j2;
mj = rj.w;
} else {
j = j1;
};
} else {
j = j1;
}
}
//Add softening to the distance between the chosen particles
ds2 += eps2;
vj = vel[j];
aj = bodies_acc[j];
const float3 vda = make_float3(ai.x - aj.x,
ai.y - aj.y,
ai.z - aj.z);
const float3 vdv = make_float3(vi.x - vj.x,
vi.y - vj.y,
vi.z - vj.z);
const float vs2 = vdv.x*vdv.x + vdv.y*vdv.y + vdv.z*vdv.z;
//Compute the minimum crossing time
const float mct = (ds2*ds2) / (vs2*vs2);
//Free fall time
float da2 = vda.x*vda.x + vda.y*vda.y + vda.z*vda.z;
float mij = mi + mj; //Sum masses
da2 *= (mij*mij);
const float fft = (ds2 / da2);
//Time step is minimum of the free fall time and minimum crossing time
float dt_est = sqrt(sqrt(min(mct, fft)));
//Make it a power of 2
float dt_param = eta; //eta
// float dt_param = 1.0; //eta
float dt = dt_est*dt_param;
int power = -(int)__log2f(dt) + 1;
power = max(power, dt_limit);
int count = 0;
dt = 1.0f/(1 << power);
while(fmodf(tc, dt) != 0.0f)
{
dt *= 0.5f; // could be slow!
count++;
if(count > 30)
{
dt = timeStep;
break;
}
}
//if(dt < 1./16384) dt = 1./16384;
//if(dt < 1./1048576) dt = 1./1048576;
time[idx].x = tc;
#ifdef ADAPTIVE_TIMESTEP
//Prevent a time-step smaller than specified through the interface
if(dt < timeStep)
dt = timeStep;
time[idx].y = tc + (double)dt;
#else
time[idx].y = tc + timeStep;
#endif
// if(idx % 1000 == 0)
// time[idx].y = tc + 1./2048 ;
// else
// time[idx].y = tc + timeStep;
#if 0
ds2 = ds2*__powf(10.0f, 0.666667f) + eps2;
// ds2 += eps2;
vj = vel[j];
aj = bodies_acc[j];
float3 vda = {ai.x - aj.x,
ai.y - aj.y,
ai.z - aj.z};
float3 vdv = {vi.x - vj.x,
vi.y - vj.y,
vi.z - vj.z};
float da = sqrtf(vda.x*vda.x + vda.y*vda.y + vda.z*vda.z);
float dv = sqrtf(vdv.x*vdv.x + vdv.y*vdv.y + vdv.z*vdv.z);
float ds = sqrtf(ds2);
float dt = eta * dv/da*(sqrt(2*da*ds/(dv*dv) + 1) - 1);
int power = -(int)__log2f(dt) + 1;
power = max(power, dt_limit);
dt = 1.0f/(1 << power);
while(fmodf(tc, dt) != 0.0f) dt *= 0.5f; // could be slow!
// dt = 0.015625;
dt = 1.0f/(1 << 8);
dt = 1.0f/(1 << 6);
dt = 1.0f/(1 << 7);
dt = timeStep;
time[idx].x = tc;
//time[idx].y = tc + dt;
time[idx].y = tc + dt;
#endif
}
//Reduce function to get the energy of the system in single precision
__device__ void compute_energyD(const int n_bodies,
real4 *pos,
real4 *vel,
real4 *acc,
float2 *energy, volatile float *shDataKin) {
// perform first level of reduction,
// reading from global memory, writing to shared memory
const int blockSize = blockDim.x;
unsigned int tid = threadIdx.x;
unsigned int i = blockIdx.x*(blockSize*2) + threadIdx.x;
unsigned int gridSize = blockSize*2*gridDim.x;
volatile float *shDataPot = (float*)&shDataKin [blockSize];
float eKin, ePot;
shDataKin[tid] = eKin = 0; //Stores Ekin
shDataPot[tid] = ePot = 0; //Stores Epot
real4 temp;
// we reduce multiple elements per thread. The number is determined by the
// number of active thread blocks (via gridSize). More blocks will result
// in a larger gridSize and therefore fewer elements per thread
while (i < n_bodies) {
if (i < n_bodies)
{
//Ekin
temp = vel[i];
eKin += pos[i].w*0.5*(temp.x*temp.x + temp.y*temp.y + temp.z*temp.z);
//Epot
ePot += pos[i].w*0.5*acc[i].w;
}
if (i + blockSize < n_bodies)
{
temp = vel[i + blockSize];
eKin += pos[i + blockSize].w*0.5*(temp.x*temp.x + temp.y*temp.y + temp.z*temp.z);
//Epot
ePot += pos[i + blockSize].w*0.5*acc[i + blockSize].w;
}
i += gridSize;
}
shDataKin[tid] = eKin;
shDataPot[tid] = ePot;
__syncthreads();
// do reduction in shared mem
if (blockSize >= 512) { if (tid < 256) {
shDataPot[tid] = ePot = ePot + shDataPot[tid + 256];
shDataKin[tid] = eKin = eKin + shDataKin[tid + 256]; } __syncthreads(); }
if (blockSize >= 256) { if (tid < 128) {
shDataPot[tid] = ePot = ePot + shDataPot[tid + 128];
shDataKin[tid] = eKin = eKin + shDataKin[tid + 128]; } __syncthreads(); }
if (blockSize >= 128) { if (tid < 64) {
shDataPot[tid] = ePot = ePot + shDataPot[tid + 64];
shDataKin[tid] = eKin = eKin + shDataKin[tid + 64]; } __syncthreads(); }
#ifndef __DEVICE_EMULATION__
if (tid < 32)
#endif
{
if (blockSize >= 64) {shDataKin[tid] = eKin = eKin + shDataKin[tid + 32]; shDataPot[tid] = ePot = ePot + shDataPot[tid + 32]; EMUSYNC; }
if (blockSize >= 32) {shDataKin[tid] = eKin = eKin + shDataKin[tid + 16]; shDataPot[tid] = ePot = ePot + shDataPot[tid + 16]; EMUSYNC; }
if (blockSize >= 16) {shDataKin[tid] = eKin = eKin + shDataKin[tid + 8]; shDataPot[tid] = ePot = ePot + shDataPot[tid + 8]; EMUSYNC; }
if (blockSize >= 8) {shDataKin[tid] = eKin = eKin + shDataKin[tid + 4]; shDataPot[tid] = ePot = ePot + shDataPot[tid + 4]; EMUSYNC; }
if (blockSize >= 4) {shDataKin[tid] = eKin = eKin + shDataKin[tid + 2]; shDataPot[tid] = ePot = ePot + shDataPot[tid + 2]; EMUSYNC; }
if (blockSize >= 2) {shDataKin[tid] = eKin = eKin + shDataKin[tid + 1]; shDataPot[tid] = ePot = ePot + shDataPot[tid + 1]; EMUSYNC; }
}
// write result for this block to global mem
if (tid == 0) energy[blockIdx.x] = (float2){shDataKin[0], shDataPot[0] };
}
extern "C" __global__ void compute_energy(const int n_bodies,
real4 *pos,
real4 *vel,
real4 *acc,
float2 *energy) {
extern __shared__ float shDataKin[];
compute_energyD(n_bodies, pos, vel, acc, energy,shDataKin);
}
//Reduce function to get the energy of the system in double precision
__device__ void compute_energy_doubleD(const int n_bodies,
real4 *pos,
real4 *vel,
real4 *acc,
double2 *energy, volatile double *shDDataKin) {
// perform first level of reduction,
// reading from global memory, writing to shared memory
const int blockSize = blockDim.x;
unsigned int tid = threadIdx.x;
unsigned int i = blockIdx.x*(blockSize*2) + threadIdx.x;
unsigned int gridSize = blockSize*2*gridDim.x;
volatile double *shDDataPot = (double*)&shDDataKin [blockSize];
double eKin, ePot;
shDDataKin[tid] = eKin = 0; //Stores Ekin
shDDataPot[tid] = ePot = 0; //Stores Epot
real4 temp;
// we reduce multiple elements per thread. The number is determined by the
// number of active thread blocks (via gridSize). More blocks will result
// in a larger gridSize and therefore fewer elements per thread
while (i < n_bodies) {
if (i < n_bodies)
{
//Ekin
temp = vel[i];
eKin += pos[i].w*0.5*(temp.x*temp.x + temp.y*temp.y + temp.z*temp.z);
//Epot
ePot += pos[i].w*0.5*acc[i].w;
}
if (i + blockSize < n_bodies)
{
temp = vel[i + blockSize];
eKin += pos[i + blockSize].w*0.5*(temp.x*temp.x + temp.y*temp.y + temp.z*temp.z);
//Epot
ePot += pos[i + blockSize].w*0.5*acc[i + blockSize].w;
}
i += gridSize;
}
shDDataKin[tid] = eKin;
shDDataPot[tid] = ePot;
__syncthreads();
// do reduction in shared mem
if (blockSize >= 512) { if (tid < 256) {
shDDataPot[tid] = ePot = ePot + shDDataPot[tid + 256];
shDDataKin[tid] = eKin = eKin + shDDataKin[tid + 256]; } __syncthreads(); }
if (blockSize >= 256) { if (tid < 128) {
shDDataPot[tid] = ePot = ePot + shDDataPot[tid + 128];
shDDataKin[tid] = eKin = eKin + shDDataKin[tid + 128]; } __syncthreads(); }
if (blockSize >= 128) { if (tid < 64) {
shDDataPot[tid] = ePot = ePot + shDDataPot[tid + 64];
shDDataKin[tid] = eKin = eKin + shDDataKin[tid + 64]; } __syncthreads(); }
#ifndef __DEVICE_EMULATION__
if (tid < 32)
#endif
{
if (blockSize >= 64) {shDDataKin[tid] = eKin = eKin + shDDataKin[tid + 32]; shDDataPot[tid] = ePot = ePot + shDDataPot[tid + 32]; EMUSYNC; }
if (blockSize >= 32) {shDDataKin[tid] = eKin = eKin + shDDataKin[tid + 16]; shDDataPot[tid] = ePot = ePot + shDDataPot[tid + 16]; EMUSYNC; }
if (blockSize >= 16) {shDDataKin[tid] = eKin = eKin + shDDataKin[tid + 8]; shDDataPot[tid] = ePot = ePot + shDDataPot[tid + 8]; EMUSYNC; }
if (blockSize >= 8) {shDDataKin[tid] = eKin = eKin + shDDataKin[tid + 4]; shDDataPot[tid] = ePot = ePot + shDDataPot[tid + 4]; EMUSYNC; }
if (blockSize >= 4) {shDDataKin[tid] = eKin = eKin + shDDataKin[tid + 2]; shDDataPot[tid] = ePot = ePot + shDDataPot[tid + 2]; EMUSYNC; }
if (blockSize >= 2) {shDDataKin[tid] = eKin = eKin + shDDataKin[tid + 1]; shDDataPot[tid] = ePot = ePot + shDDataPot[tid + 1]; EMUSYNC; }
}
// write result for this block to global mem
if (tid == 0) energy[blockIdx.x] = (double2){shDDataKin[0], shDDataPot[0] };
}
//Reduce function to get the energy of the system
extern "C" __global__ void compute_energy_double(const int n_bodies,
real4 *pos,
real4 *vel,
real4 *acc,
double2 *energy) {
extern __shared__ double shDDataKin[];
compute_energy_doubleD(n_bodies, pos, vel, acc, energy, shDDataKin);
}
extern "C" __global__ void distanceCheck(const int n_bodies,
real4 *pos,
int *ids,
real4 *out,
const int numberOfBH,
real4 *vel)
{
const int bid = blockIdx.y * gridDim.x + blockIdx.x;
const int tid = threadIdx.y * blockDim.x + threadIdx.x;
const int dim = blockDim.x * blockDim.y;
int idx = bid * dim + tid;
if (idx >= n_bodies) return;
int partID = ids[idx];
if(partID < numberOfBH)
{
real4 curPos = pos[idx];
//curPos.w = partID;
out[partID*2+0] = curPos;
out[partID*2+1] = vel[idx];
}
}
|
4eba61f096e37c7787e69b362a0ad552bbd02d72.hip | // !!! This is a file automatically generated by hipify!!!
#include "SteerForAlignment.h"
#include <hip/hip_runtime.h>
#include "OpenSteer/VehicleData.h"
#include "OpenSteer/NeighborDataProvider.h"
#include "CUDAKernelOptions.cu"
#include <iostream>
using namespace OpenSteer;
using namespace std;
__global__ void
steerForAlignmentKernel(VehicleData *vehicleData, VehicleConst *vehicleConst, float3 *steeringVectors, float maxDistance, float cosMaxAngle, NeighborData* neighborData, float weight, kernel_options options);
OpenSteer::SteerForAlignment::SteerForAlignment(NeighborDataProvider*neighborDataProvider, float maxDistance, float cosMaxAngle, float weight, kernel_options options)
{
threadsPerBlock = 128;
this->weight = weight;
this->options = options;
this->neighborDataProvider = neighborDataProvider;
this->maxDistance = maxDistance;
this->cosMaxAngle = cosMaxAngle;
}
OpenSteer::SteerForAlignment::~SteerForAlignment() {}
void OpenSteer::SteerForAlignment::init()
{
// nothing to do
}
void OpenSteer::SteerForAlignment::run()
{
hipLaunchKernelGGL(( steerForAlignmentKernel), dim3(gridDim()), dim3(blockDim()), 0, 0, getVehicleData(), getVehicleConst(), getSteeringVectors(), maxDistance, cosMaxAngle, neighborDataProvider->getNeighborData(), weight, options);
}
void OpenSteer::SteerForAlignment::close()
{
// nothing to do
}
| 4eba61f096e37c7787e69b362a0ad552bbd02d72.cu | #include "SteerForAlignment.h"
#include <cuda_runtime.h>
#include "OpenSteer/VehicleData.h"
#include "OpenSteer/NeighborDataProvider.h"
#include "CUDAKernelOptions.cu"
#include <iostream>
using namespace OpenSteer;
using namespace std;
__global__ void
steerForAlignmentKernel(VehicleData *vehicleData, VehicleConst *vehicleConst, float3 *steeringVectors, float maxDistance, float cosMaxAngle, NeighborData* neighborData, float weight, kernel_options options);
OpenSteer::SteerForAlignment::SteerForAlignment(NeighborDataProvider*neighborDataProvider, float maxDistance, float cosMaxAngle, float weight, kernel_options options)
{
threadsPerBlock = 128;
this->weight = weight;
this->options = options;
this->neighborDataProvider = neighborDataProvider;
this->maxDistance = maxDistance;
this->cosMaxAngle = cosMaxAngle;
}
OpenSteer::SteerForAlignment::~SteerForAlignment() {}
void OpenSteer::SteerForAlignment::init()
{
// nothing to do
}
void OpenSteer::SteerForAlignment::run()
{
steerForAlignmentKernel<<<gridDim(), blockDim()>>>(getVehicleData(), getVehicleConst(), getSteeringVectors(), maxDistance, cosMaxAngle, neighborDataProvider->getNeighborData(), weight, options);
}
void OpenSteer::SteerForAlignment::close()
{
// nothing to do
}
|
208f5d95da0f3f1b6f13d8998d43bd3afc7c97bc.hip | // !!! This is a file automatically generated by hipify!!!
#include <stdbool.h>
#include <stdio.h>
#include <string.h>
#include <getopt.h>
#include <hiprand/hiprand_kernel.h>
#include <stdlib.h>
#include <hip/hip_runtime.h>
#include <sys/time.h>
#include "histogramm.cu"
#include<chrono>
#include<iostream>
using namespace std;
using namespace std::chrono;
int blocks_[20][2] = {{8,8},{16,16},{24,24},{32,32},{1,64},{1,128},{1,192},{1,256},{1,320},{1,384},{1,448},{1,512},{1,576},{1,640},{1,704},{1,768},{1,832},{1,896},{1,960},{1,1024}};
int matrices_[7][2] = {{240,240},{496,496},{784,784},{1016,1016},{1232,1232},{1680,1680},{2024,2024}};
int main(int argc, char **argv) {
hipSetDevice(0);
char* p;int matrix_len=strtol(argv[1], &p, 10);
for(int matrix_looper=0;matrix_looper<matrix_len;matrix_looper++){
for(int block_looper=0;block_looper<20;block_looper++){
int XSIZE=matrices_[matrix_looper][0],YSIZE=matrices_[matrix_looper][1],BLOCKX=blocks_[block_looper][0],BLOCKY=blocks_[block_looper][1];
float *hist = NULL;
hipMalloc(&hist, XSIZE*YSIZE);
unsigned char *input = NULL;
hipMalloc(&input, XSIZE*YSIZE);
int width = XSIZE;
int height = YSIZE;
int stride = 2;
int iXSIZE= XSIZE;
int iYSIZE= YSIZE;
while(iXSIZE%BLOCKX!=0)
{
iXSIZE++;
}
while(iYSIZE%BLOCKY!=0)
{
iYSIZE++;
}
dim3 gridBlock(iXSIZE/BLOCKX, iYSIZE/BLOCKY);
dim3 threadBlock(BLOCKX, BLOCKY);
hipFree(0);hipLaunchKernelGGL((
histogramm), dim3(gridBlock),dim3(threadBlock), 0, 0, hist,input,width,height,stride);
hipDeviceSynchronize();
for (int loop_counter = 0; loop_counter < 10; ++loop_counter) {hipLaunchKernelGGL((
histogramm), dim3(gridBlock),dim3(threadBlock), 0, 0, hist,input,width,height,stride);
}
auto start = steady_clock::now();
for (int loop_counter = 0; loop_counter < 1000; loop_counter++) {hipLaunchKernelGGL((
histogramm), dim3(gridBlock),dim3(threadBlock), 0, 0, hist,input,width,height,stride);
}
auto end = steady_clock::now();
auto usecs = duration_cast<duration<float, microseconds::period> >(end - start);
cout <<'['<<usecs.count()<<','<<'('<<BLOCKX<<','<<BLOCKY<<')' << ','<<'('<<XSIZE<<','<<YSIZE<<')'<<']' << endl;
}
}} | 208f5d95da0f3f1b6f13d8998d43bd3afc7c97bc.cu | #include <stdbool.h>
#include <stdio.h>
#include <string.h>
#include <getopt.h>
#include <curand_kernel.h>
#include <stdlib.h>
#include <cuda.h>
#include <sys/time.h>
#include "histogramm.cu"
#include<chrono>
#include<iostream>
using namespace std;
using namespace std::chrono;
int blocks_[20][2] = {{8,8},{16,16},{24,24},{32,32},{1,64},{1,128},{1,192},{1,256},{1,320},{1,384},{1,448},{1,512},{1,576},{1,640},{1,704},{1,768},{1,832},{1,896},{1,960},{1,1024}};
int matrices_[7][2] = {{240,240},{496,496},{784,784},{1016,1016},{1232,1232},{1680,1680},{2024,2024}};
int main(int argc, char **argv) {
cudaSetDevice(0);
char* p;int matrix_len=strtol(argv[1], &p, 10);
for(int matrix_looper=0;matrix_looper<matrix_len;matrix_looper++){
for(int block_looper=0;block_looper<20;block_looper++){
int XSIZE=matrices_[matrix_looper][0],YSIZE=matrices_[matrix_looper][1],BLOCKX=blocks_[block_looper][0],BLOCKY=blocks_[block_looper][1];
float *hist = NULL;
cudaMalloc(&hist, XSIZE*YSIZE);
unsigned char *input = NULL;
cudaMalloc(&input, XSIZE*YSIZE);
int width = XSIZE;
int height = YSIZE;
int stride = 2;
int iXSIZE= XSIZE;
int iYSIZE= YSIZE;
while(iXSIZE%BLOCKX!=0)
{
iXSIZE++;
}
while(iYSIZE%BLOCKY!=0)
{
iYSIZE++;
}
dim3 gridBlock(iXSIZE/BLOCKX, iYSIZE/BLOCKY);
dim3 threadBlock(BLOCKX, BLOCKY);
cudaFree(0);
histogramm<<<gridBlock,threadBlock>>>(hist,input,width,height,stride);
cudaDeviceSynchronize();
for (int loop_counter = 0; loop_counter < 10; ++loop_counter) {
histogramm<<<gridBlock,threadBlock>>>(hist,input,width,height,stride);
}
auto start = steady_clock::now();
for (int loop_counter = 0; loop_counter < 1000; loop_counter++) {
histogramm<<<gridBlock,threadBlock>>>(hist,input,width,height,stride);
}
auto end = steady_clock::now();
auto usecs = duration_cast<duration<float, microseconds::period> >(end - start);
cout <<'['<<usecs.count()<<','<<'('<<BLOCKX<<','<<BLOCKY<<')' << ','<<'('<<XSIZE<<','<<YSIZE<<')'<<']' << endl;
}
}} |
fb2bf9353dc07b63e0c04e8ef87bd3127d2c0460.hip | // !!! This is a file automatically generated by hipify!!!
#include <stdio.h>
#include <stdlib.h>
#include <assert.h>
#include <hip/hip_runtime.h>
#include <cusolverDn.h>
// Macro for timing kernel runs
#define START_METER {\
hipEvent_t start, stop;\
float elapsedTime;\
hipEventCreate(&start);\
hipEventRecord(start, 0);
#define STOP_METER hipEventCreate(&stop);\
hipEventRecord(stop, 0);\
hipEventSynchronize(stop);\
hipEventElapsedTime(&elapsedTime, start, stop);\
printf("Elapsed time : %f ms\n", elapsedTime);\
}
int main()
{
const int m = 64;
const int rows = m;
const int cols = m;
/* | 3.5 0.5 0 |
* A = | 0.5 3.5 0 |
* | 0 0 2 |
*
*/
double A[rows*m];
for (int i = 0; i < cols; i++)
{
for (int j = 0; j < rows; j++)
{
A[i*rows + j] = (double)rand() / RAND_MAX;
if (i == j){
A[i*rows + j] += 1;
}
}
}
hipsolverDnHandle_t handle;
hipsolverDnCreate(&handle);
int lwork;
hipsolverDnDgesvd_bufferSize(
handle,
rows,
cols,
&lwork);
double *d_A;
hipMalloc(&d_A, sizeof(double)*rows*cols);
hipMemcpy(d_A, A, sizeof(double)*rows*cols, hipMemcpyHostToDevice);
double *d_S;
hipMalloc(&d_S, sizeof(double)*rows);
double *d_U;
hipMalloc(&d_U, sizeof(double)*rows*rows);
double *d_VT;
hipMalloc(&d_VT, sizeof(double)*rows*rows);
double *d_work;
hipMalloc(&d_work, sizeof(double)*lwork);
double *d_rwork;
hipMalloc(&d_rwork, sizeof(double)*(rows - 1));
int *devInfo;
hipMalloc(&devInfo, sizeof(int));
signed char jobu = 'A';
signed char jobvt = 'A';
START_METER
hipsolverDnDgesvd(
handle,
jobu,
jobvt,
rows,
cols,
d_A,
rows,
d_S,
d_U,
rows,
d_VT,
rows,
d_work,
lwork,
d_rwork,
devInfo);
STOP_METER
hipFree(d_A);
hipFree(d_rwork);
hipFree(d_S);
hipFree(d_U);
hipFree(d_VT);
hipFree(d_work);
} | fb2bf9353dc07b63e0c04e8ef87bd3127d2c0460.cu | #include <stdio.h>
#include <stdlib.h>
#include <assert.h>
#include <cuda_runtime.h>
#include <cusolverDn.h>
// Macro for timing kernel runs
#define START_METER {\
cudaEvent_t start, stop;\
float elapsedTime;\
cudaEventCreate(&start);\
cudaEventRecord(start, 0);
#define STOP_METER cudaEventCreate(&stop);\
cudaEventRecord(stop, 0);\
cudaEventSynchronize(stop);\
cudaEventElapsedTime(&elapsedTime, start, stop);\
printf("Elapsed time : %f ms\n", elapsedTime);\
}
int main()
{
const int m = 64;
const int rows = m;
const int cols = m;
/* | 3.5 0.5 0 |
* A = | 0.5 3.5 0 |
* | 0 0 2 |
*
*/
double A[rows*m];
for (int i = 0; i < cols; i++)
{
for (int j = 0; j < rows; j++)
{
A[i*rows + j] = (double)rand() / RAND_MAX;
if (i == j){
A[i*rows + j] += 1;
}
}
}
cusolverDnHandle_t handle;
cusolverDnCreate(&handle);
int lwork;
cusolverDnDgesvd_bufferSize(
handle,
rows,
cols,
&lwork);
double *d_A;
cudaMalloc(&d_A, sizeof(double)*rows*cols);
cudaMemcpy(d_A, A, sizeof(double)*rows*cols, cudaMemcpyHostToDevice);
double *d_S;
cudaMalloc(&d_S, sizeof(double)*rows);
double *d_U;
cudaMalloc(&d_U, sizeof(double)*rows*rows);
double *d_VT;
cudaMalloc(&d_VT, sizeof(double)*rows*rows);
double *d_work;
cudaMalloc(&d_work, sizeof(double)*lwork);
double *d_rwork;
cudaMalloc(&d_rwork, sizeof(double)*(rows - 1));
int *devInfo;
cudaMalloc(&devInfo, sizeof(int));
signed char jobu = 'A';
signed char jobvt = 'A';
START_METER
cusolverDnDgesvd(
handle,
jobu,
jobvt,
rows,
cols,
d_A,
rows,
d_S,
d_U,
rows,
d_VT,
rows,
d_work,
lwork,
d_rwork,
devInfo);
STOP_METER
cudaFree(d_A);
cudaFree(d_rwork);
cudaFree(d_S);
cudaFree(d_U);
cudaFree(d_VT);
cudaFree(d_work);
} |
a388ffc6cfb81dd74bb31e8cfcb72b463199225a.hip | // !!! This is a file automatically generated by hipify!!!
#include <stdio.h>
#include <stdlib.h>
#include <hip/hip_runtime.h>
#define NUM_THREADS 8
#define NUM_BLOCKS 1
__host__ void generate_rand_data(unsigned int * data, unsigned int num_elements)
{
for(unsigned int i=0; i < num_elements; i++)
{
data[i] = rand() % 10;
}
}
__device__ void copy_data_to_shared(unsigned int * const data,
unsigned int * const shared_tmp,
const unsigned int tid)
{
// Copy data into shared memory
shared_tmp[tid] = data[tid];
__syncthreads();
}
__device__ void simple_squaring_operation(unsigned int * const data,
const unsigned int tid)
{
//square the mem value and overwrite
data[tid] = data[tid] * data[tid];
}
__global__ void gpu_register_array_operation(unsigned int * const data, const unsigned int num_elements)
{
const unsigned int tid = (blockIdx.x * blockDim.x) + threadIdx.x;
//perform some simple operation
simple_squaring_operation(data, tid);
}
__global__ void gpu_shared_array_operation(unsigned int * const data, const unsigned int num_elements)
{
const unsigned int tid = (blockIdx.x * blockDim.x) + threadIdx.x;
//allocate shared memory
__shared__ unsigned int shared_tmp[NUM_THREADS];
//make a copy of the global device data into the shared device memory
copy_data_to_shared(data, shared_tmp, tid);
//perform some simple operation
simple_squaring_operation(shared_tmp, tid);
//push updated shared mem back to the initial global data mem
data[tid] = shared_tmp[tid];
}
//tier 2 method for printing a specific cuda device properties
//already called by the tier 1 method
void print_all_device_properties(int device_id){
hipDeviceProp_t prop;
hipGetDeviceProperties( &prop, device_id);
// printf("============Start Device %x============\n", device_id);
// printf("Name: %s\n", prop.name);
// printf("Total global memory: %lu\n", prop.totalGlobalMem);
// printf("Total shared memory per block: %lu\n", prop.sharedMemPerBlock);
// printf("Total registers per block: %lu\n", (unsigned long)prop.regsPerBlock);
// printf("Warp size: %lu\n", (unsigned long)prop.warpSize);
// printf("Maximum memory pitch: %lu\n", prop.memPitch);
// printf("Maximum threads per block: %lu\n", (unsigned long)prop.maxThreadsPerBlock);
// for (int i = 0; i < 3; ++i)
// printf("Maximum dimension %d of block: %lu\n", i, (unsigned long)prop.maxThreadsDim[i]);
// for (int i = 0; i < 3; ++i)
// printf("Maximum dimension %d of grid: %lu\n", i, (unsigned long)prop.maxGridSize[i]);
// printf("Total constant memory: %lu\n", prop.totalConstMem);
// printf("Major revision number: %lu\n", (unsigned long)prop.major);
// printf("Minor revision number: %lu\n", (unsigned long)prop.minor);
// printf("Clock rate: %lu\n", (unsigned long)prop.clockRate);
// printf("Texture alignment: %lu\n", prop.textureAlignment);
// printf("Concurrent copy and execution: %s\n", (prop.deviceOverlap ? "Yes" : "No"));
// printf("Number of multiprocessors: %lu\n", (unsigned long)prop.multiProcessorCount);
// printf("Kernel execution timeout: %s\n", (prop.kernelExecTimeoutEnabled ? "Yes" : "No"));
// printf("Integrated: %s\n", (prop.integrated ? "Yes" : "No"));
// printf("Mapable Host Memory: %s\n", (prop.canMapHostMemory ? "Yes" : "No"));
// printf("Compute Mode: %d\n", prop.computeMode);
// printf("Concurrent Kernels: %d\n", prop.concurrentKernels);
// printf("ECC Enabled: %s\n", (prop.ECCEnabled ? "Yes" : "No"));
// printf("pci Bus ID: %lu\n", (unsigned long)prop.pciBusID);
// printf("pci Device ID: %lu\n", (unsigned long)prop.pciDeviceID);
// printf("Using a tcc Driver: %s\n", (prop.tccDriver ? "Yes" : "No"));
// printf("============End Device %x============\n", device_id);
printf("============Start Device %x============\n", device_id);
printf("Name: %s\n", prop.name);
printf("Total global memory: %lu\n", prop.totalGlobalMem);
printf("Total shared memory per block: %lu\n", prop.sharedMemPerBlock);
printf("Total registers per block: %d\n", prop.regsPerBlock);
printf("Warp size: %d\n", prop.warpSize);
printf("Maximum memory pitch: %lu\n", prop.memPitch);
printf("Maximum threads per block: %d\n", prop.maxThreadsPerBlock);
for (int i = 0; i < 3; ++i)
printf("Maximum dimension %d of block: %d\n", i, prop.maxThreadsDim[i]);
for (int i = 0; i < 3; ++i)
printf("Maximum dimension %d of grid: %d\n", i, prop.maxGridSize[i]);
printf("Total constant memory: %lu\n", prop.totalConstMem);
printf("Major revision number: %d\n", prop.major);
printf("Minor revision number: %d\n", prop.minor);
printf("Clock rate: %d\n", prop.clockRate);
printf("Texture alignment: %lu\n", prop.textureAlignment);
printf("Concurrent copy and execution: %s\n", (prop.deviceOverlap ? "Yes" : "No"));
printf("Number of multiprocessors: %d\n", prop.multiProcessorCount);
printf("Kernel execution timeout: %s\n", (prop.kernelExecTimeoutEnabled ? "Yes" : "No"));
printf("Integrated: %s\n", (prop.integrated ? "Yes" : "No"));
printf("Mapable Host Memory: %s\n", (prop.canMapHostMemory ? "Yes" : "No"));
printf("Compute Mode: %d\n", prop.computeMode);
printf("Concurrent Kernels: %d\n", prop.concurrentKernels);
printf("ECC Enabled: %s\n", (prop.ECCEnabled ? "Yes" : "No"));
printf("pci Bus ID: %d\n", prop.pciBusID);
printf("pci Device ID: %d\n", prop.pciDeviceID);
printf("Using a tcc Driver: %s\n", (prop.tccDriver ? "Yes" : "No"));
printf("============End Device %x============\n", device_id);
}
//tier 1 method for printing all cuda devices and their properties
void print_all_CUDA_devices_and_properties() {
int device_id;
hipGetDeviceCount( &device_id);
printf("Print of all CUDA devices and device properties\n");
for (int i = 0; i < device_id; i++){
//states that hipDeviceProp_t returns a 25 data types in a struct
print_all_device_properties(device_id);
}
}
__host__ float execute_register_memory_operations(void)
{
const unsigned int num_elements = NUM_THREADS;
const unsigned int num_bytes = NUM_THREADS * sizeof(unsigned int);
unsigned int * d_data; //device data
unsigned int hi_data[num_elements]; //initial host data
unsigned int hf_data[num_elements]; //final host data
/* Set timing Metrics */
hipEvent_t kernel_start, kernel_stop;
float delta = 0.0F;
hipEventCreate(&kernel_start,0);
hipEventCreate(&kernel_stop,0);
//set CUDA stream
hipStream_t stream;
hipStreamCreate(&stream);
//start timing metric
hipEventRecord(kernel_start, 0);
//device memory alloc
hipMalloc(&d_data, num_bytes);
//populate the initial host array with random data
generate_rand_data(hi_data, num_elements);
//copy from host memory to device memory
hipMemcpy(d_data, hi_data, num_bytes, hipMemcpyHostToDevice);
//Call GPU kernel <<<BLOCK TOTAL, THREADS TOTAL>>>
hipLaunchKernelGGL(( gpu_register_array_operation), dim3(NUM_BLOCKS), dim3(NUM_THREADS), 0, 0, d_data, num_elements);
hipStreamSynchronize(stream); // Wait for the GPU launched work to complete
hipGetLastError();
//copy from device to host memory
hipMemcpy(hf_data, d_data, num_bytes, hipMemcpyDeviceToHost);
//end timing metric
hipEventRecord(kernel_stop, 0);
hipEventSynchronize(kernel_stop);
hipEventElapsedTime(&delta, kernel_start, kernel_stop);
hipEventDestroy(kernel_start);
hipEventDestroy(kernel_stop);
//console print the host data after the GPU kernal
for (int i = 0; i < num_elements; i++){
printf("Input value: %x, device output: %x\n", hi_data[i], hf_data[i]);
}
//free device and host memory allocations
hipFree((void* ) d_data); //free devide data
hipHostFree(hi_data); //free up the host memory
hipHostFree(hf_data); //free up the host memory
hipDeviceReset();
return delta;
}
__host__ float execute_shared_memory_operations()
{
const unsigned int num_elements = NUM_THREADS;
const unsigned int num_bytes = NUM_THREADS * sizeof(unsigned int);
unsigned int * d_data; //device data
unsigned int hi_data[num_elements]; //initial host data
unsigned int hf_data[num_elements]; //final host data
/* Set timing Metrics */
hipEvent_t kernel_start, kernel_stop;
float delta = 0.0F;
hipEventCreate(&kernel_start,0);
hipEventCreate(&kernel_stop,0);
//set CUDA stream
hipStream_t stream;
hipStreamCreate(&stream);
//start timing metric
hipEventRecord(kernel_start, 0);
//device memory alloc
hipMalloc(&d_data, num_bytes);
//populate the initial host array with random data
generate_rand_data(hi_data, num_elements);
//copy from host memory to device memory
hipMemcpy(d_data, hi_data, num_bytes, hipMemcpyHostToDevice);
//Call GPU kernels <<<BLOCK TOTAL, THREADS TOTAL>>>
hipLaunchKernelGGL(( gpu_shared_array_operation), dim3(NUM_BLOCKS), dim3(NUM_THREADS), 0, 0, d_data, num_elements);
//sync the cuda stream
hipStreamSynchronize(stream); // Wait for the GPU launched work to complete
hipGetLastError(); //error handling
//copy from device to host memory
hipMemcpy(hf_data, d_data, num_bytes, hipMemcpyDeviceToHost);
//end timing metric
hipEventRecord(kernel_stop, 0);
hipEventSynchronize(kernel_stop);
hipEventElapsedTime(&delta, kernel_start, kernel_stop);
hipEventDestroy(kernel_start);
hipEventDestroy(kernel_stop);
//console print the host data after the GPU kernal
for (int i = 0; i < num_elements; i++){
printf("Input value: %x, device output: %x\n", hi_data[i], hf_data[i]);
}
//free device and host memory allocations
hipFree((void* ) d_data); //free devide data
hipHostFree(hi_data); //free up the host memory
hipHostFree(hf_data); //free up the host memory
hipDeviceReset();
return delta;
}
/**
* Host function that prepares data array and passes it to the CUDA kernel.
*/
int main(void) {
//print all cuda devices and device properties for kicks
print_all_CUDA_devices_and_properties();
//test harness for timing some kernels using streams and events
float delta_shared = execute_shared_memory_operations();
float delta_register = execute_register_memory_operations();
//print out the results of the time executions returned by the prev methods
printf("========================\n");
printf("Summary\n");
printf("Total Threads: %d\n", NUM_THREADS);
printf("Total Blocks: %d\n", NUM_BLOCKS);
printf("========================\n");
printf("Time to copy global to shared mem, perform simple operation w/ shared memory, copy memory back to global\n");
printf("duration: %fms\n",delta_shared);
printf("========================\n");
printf("Time to copy global to register mem, perform simple operation w/ register memory, copy memory back to global\n");
printf("duration: %fms\n",delta_register);
return 0;
}
| a388ffc6cfb81dd74bb31e8cfcb72b463199225a.cu | #include <stdio.h>
#include <stdlib.h>
#include <cuda.h>
#define NUM_THREADS 8
#define NUM_BLOCKS 1
__host__ void generate_rand_data(unsigned int * data, unsigned int num_elements)
{
for(unsigned int i=0; i < num_elements; i++)
{
data[i] = rand() % 10;
}
}
__device__ void copy_data_to_shared(unsigned int * const data,
unsigned int * const shared_tmp,
const unsigned int tid)
{
// Copy data into shared memory
shared_tmp[tid] = data[tid];
__syncthreads();
}
__device__ void simple_squaring_operation(unsigned int * const data,
const unsigned int tid)
{
//square the mem value and overwrite
data[tid] = data[tid] * data[tid];
}
__global__ void gpu_register_array_operation(unsigned int * const data, const unsigned int num_elements)
{
const unsigned int tid = (blockIdx.x * blockDim.x) + threadIdx.x;
//perform some simple operation
simple_squaring_operation(data, tid);
}
__global__ void gpu_shared_array_operation(unsigned int * const data, const unsigned int num_elements)
{
const unsigned int tid = (blockIdx.x * blockDim.x) + threadIdx.x;
//allocate shared memory
__shared__ unsigned int shared_tmp[NUM_THREADS];
//make a copy of the global device data into the shared device memory
copy_data_to_shared(data, shared_tmp, tid);
//perform some simple operation
simple_squaring_operation(shared_tmp, tid);
//push updated shared mem back to the initial global data mem
data[tid] = shared_tmp[tid];
}
//tier 2 method for printing a specific cuda device properties
//already called by the tier 1 method
void print_all_device_properties(int device_id){
cudaDeviceProp prop;
cudaGetDeviceProperties( &prop, device_id);
// printf("============Start Device %x============\n", device_id);
// printf("Name: %s\n", prop.name);
// printf("Total global memory: %lu\n", prop.totalGlobalMem);
// printf("Total shared memory per block: %lu\n", prop.sharedMemPerBlock);
// printf("Total registers per block: %lu\n", (unsigned long)prop.regsPerBlock);
// printf("Warp size: %lu\n", (unsigned long)prop.warpSize);
// printf("Maximum memory pitch: %lu\n", prop.memPitch);
// printf("Maximum threads per block: %lu\n", (unsigned long)prop.maxThreadsPerBlock);
// for (int i = 0; i < 3; ++i)
// printf("Maximum dimension %d of block: %lu\n", i, (unsigned long)prop.maxThreadsDim[i]);
// for (int i = 0; i < 3; ++i)
// printf("Maximum dimension %d of grid: %lu\n", i, (unsigned long)prop.maxGridSize[i]);
// printf("Total constant memory: %lu\n", prop.totalConstMem);
// printf("Major revision number: %lu\n", (unsigned long)prop.major);
// printf("Minor revision number: %lu\n", (unsigned long)prop.minor);
// printf("Clock rate: %lu\n", (unsigned long)prop.clockRate);
// printf("Texture alignment: %lu\n", prop.textureAlignment);
// printf("Concurrent copy and execution: %s\n", (prop.deviceOverlap ? "Yes" : "No"));
// printf("Number of multiprocessors: %lu\n", (unsigned long)prop.multiProcessorCount);
// printf("Kernel execution timeout: %s\n", (prop.kernelExecTimeoutEnabled ? "Yes" : "No"));
// printf("Integrated: %s\n", (prop.integrated ? "Yes" : "No"));
// printf("Mapable Host Memory: %s\n", (prop.canMapHostMemory ? "Yes" : "No"));
// printf("Compute Mode: %d\n", prop.computeMode);
// printf("Concurrent Kernels: %d\n", prop.concurrentKernels);
// printf("ECC Enabled: %s\n", (prop.ECCEnabled ? "Yes" : "No"));
// printf("pci Bus ID: %lu\n", (unsigned long)prop.pciBusID);
// printf("pci Device ID: %lu\n", (unsigned long)prop.pciDeviceID);
// printf("Using a tcc Driver: %s\n", (prop.tccDriver ? "Yes" : "No"));
// printf("============End Device %x============\n", device_id);
printf("============Start Device %x============\n", device_id);
printf("Name: %s\n", prop.name);
printf("Total global memory: %lu\n", prop.totalGlobalMem);
printf("Total shared memory per block: %lu\n", prop.sharedMemPerBlock);
printf("Total registers per block: %d\n", prop.regsPerBlock);
printf("Warp size: %d\n", prop.warpSize);
printf("Maximum memory pitch: %lu\n", prop.memPitch);
printf("Maximum threads per block: %d\n", prop.maxThreadsPerBlock);
for (int i = 0; i < 3; ++i)
printf("Maximum dimension %d of block: %d\n", i, prop.maxThreadsDim[i]);
for (int i = 0; i < 3; ++i)
printf("Maximum dimension %d of grid: %d\n", i, prop.maxGridSize[i]);
printf("Total constant memory: %lu\n", prop.totalConstMem);
printf("Major revision number: %d\n", prop.major);
printf("Minor revision number: %d\n", prop.minor);
printf("Clock rate: %d\n", prop.clockRate);
printf("Texture alignment: %lu\n", prop.textureAlignment);
printf("Concurrent copy and execution: %s\n", (prop.deviceOverlap ? "Yes" : "No"));
printf("Number of multiprocessors: %d\n", prop.multiProcessorCount);
printf("Kernel execution timeout: %s\n", (prop.kernelExecTimeoutEnabled ? "Yes" : "No"));
printf("Integrated: %s\n", (prop.integrated ? "Yes" : "No"));
printf("Mapable Host Memory: %s\n", (prop.canMapHostMemory ? "Yes" : "No"));
printf("Compute Mode: %d\n", prop.computeMode);
printf("Concurrent Kernels: %d\n", prop.concurrentKernels);
printf("ECC Enabled: %s\n", (prop.ECCEnabled ? "Yes" : "No"));
printf("pci Bus ID: %d\n", prop.pciBusID);
printf("pci Device ID: %d\n", prop.pciDeviceID);
printf("Using a tcc Driver: %s\n", (prop.tccDriver ? "Yes" : "No"));
printf("============End Device %x============\n", device_id);
}
//tier 1 method for printing all cuda devices and their properties
void print_all_CUDA_devices_and_properties() {
int device_id;
cudaGetDeviceCount( &device_id);
printf("Print of all CUDA devices and device properties\n");
for (int i = 0; i < device_id; i++){
//states that cudaDeviceProp returns a 25 data types in a struct
print_all_device_properties(device_id);
}
}
__host__ float execute_register_memory_operations(void)
{
const unsigned int num_elements = NUM_THREADS;
const unsigned int num_bytes = NUM_THREADS * sizeof(unsigned int);
unsigned int * d_data; //device data
unsigned int hi_data[num_elements]; //initial host data
unsigned int hf_data[num_elements]; //final host data
/* Set timing Metrics */
cudaEvent_t kernel_start, kernel_stop;
float delta = 0.0F;
cudaEventCreate(&kernel_start,0);
cudaEventCreate(&kernel_stop,0);
//set CUDA stream
cudaStream_t stream;
cudaStreamCreate(&stream);
//start timing metric
cudaEventRecord(kernel_start, 0);
//device memory alloc
cudaMalloc(&d_data, num_bytes);
//populate the initial host array with random data
generate_rand_data(hi_data, num_elements);
//copy from host memory to device memory
cudaMemcpy(d_data, hi_data, num_bytes, cudaMemcpyHostToDevice);
//Call GPU kernel <<<BLOCK TOTAL, THREADS TOTAL>>>
gpu_register_array_operation<<<NUM_BLOCKS, NUM_THREADS>>>(d_data, num_elements);
cudaStreamSynchronize(stream); // Wait for the GPU launched work to complete
cudaGetLastError();
//copy from device to host memory
cudaMemcpy(hf_data, d_data, num_bytes, cudaMemcpyDeviceToHost);
//end timing metric
cudaEventRecord(kernel_stop, 0);
cudaEventSynchronize(kernel_stop);
cudaEventElapsedTime(&delta, kernel_start, kernel_stop);
cudaEventDestroy(kernel_start);
cudaEventDestroy(kernel_stop);
//console print the host data after the GPU kernal
for (int i = 0; i < num_elements; i++){
printf("Input value: %x, device output: %x\n", hi_data[i], hf_data[i]);
}
//free device and host memory allocations
cudaFree((void* ) d_data); //free devide data
cudaFreeHost(hi_data); //free up the host memory
cudaFreeHost(hf_data); //free up the host memory
cudaDeviceReset();
return delta;
}
__host__ float execute_shared_memory_operations()
{
const unsigned int num_elements = NUM_THREADS;
const unsigned int num_bytes = NUM_THREADS * sizeof(unsigned int);
unsigned int * d_data; //device data
unsigned int hi_data[num_elements]; //initial host data
unsigned int hf_data[num_elements]; //final host data
/* Set timing Metrics */
cudaEvent_t kernel_start, kernel_stop;
float delta = 0.0F;
cudaEventCreate(&kernel_start,0);
cudaEventCreate(&kernel_stop,0);
//set CUDA stream
cudaStream_t stream;
cudaStreamCreate(&stream);
//start timing metric
cudaEventRecord(kernel_start, 0);
//device memory alloc
cudaMalloc(&d_data, num_bytes);
//populate the initial host array with random data
generate_rand_data(hi_data, num_elements);
//copy from host memory to device memory
cudaMemcpy(d_data, hi_data, num_bytes, cudaMemcpyHostToDevice);
//Call GPU kernels <<<BLOCK TOTAL, THREADS TOTAL>>>
gpu_shared_array_operation<<<NUM_BLOCKS, NUM_THREADS>>>(d_data, num_elements);
//sync the cuda stream
cudaStreamSynchronize(stream); // Wait for the GPU launched work to complete
cudaGetLastError(); //error handling
//copy from device to host memory
cudaMemcpy(hf_data, d_data, num_bytes, cudaMemcpyDeviceToHost);
//end timing metric
cudaEventRecord(kernel_stop, 0);
cudaEventSynchronize(kernel_stop);
cudaEventElapsedTime(&delta, kernel_start, kernel_stop);
cudaEventDestroy(kernel_start);
cudaEventDestroy(kernel_stop);
//console print the host data after the GPU kernal
for (int i = 0; i < num_elements; i++){
printf("Input value: %x, device output: %x\n", hi_data[i], hf_data[i]);
}
//free device and host memory allocations
cudaFree((void* ) d_data); //free devide data
cudaFreeHost(hi_data); //free up the host memory
cudaFreeHost(hf_data); //free up the host memory
cudaDeviceReset();
return delta;
}
/**
* Host function that prepares data array and passes it to the CUDA kernel.
*/
int main(void) {
//print all cuda devices and device properties for kicks
print_all_CUDA_devices_and_properties();
//test harness for timing some kernels using streams and events
float delta_shared = execute_shared_memory_operations();
float delta_register = execute_register_memory_operations();
//print out the results of the time executions returned by the prev methods
printf("========================\n");
printf("Summary\n");
printf("Total Threads: %d\n", NUM_THREADS);
printf("Total Blocks: %d\n", NUM_BLOCKS);
printf("========================\n");
printf("Time to copy global to shared mem, perform simple operation w/ shared memory, copy memory back to global\n");
printf("duration: %fms\n",delta_shared);
printf("========================\n");
printf("Time to copy global to register mem, perform simple operation w/ register memory, copy memory back to global\n");
printf("duration: %fms\n",delta_register);
return 0;
}
|
6bdbb608945374e97531ba42a36879bcfe7c69a6.hip | // !!! This is a file automatically generated by hipify!!!
// System includes
#include <stdio.h>
#include <assert.h>
// CUDA runtime
#include <hip/hip_runtime.h>
// Helper functions and utilities to work with CUDA
#include <helper_functions.h>
#include <helper_cuda.h>
__global__ void MatrixMulCUDA(double* C, double* A, double* B) {
int index = threadIdx.x;
__shared__ double shared_b[1024];
shared_b[index] = B[index];
__syncthreads();
for (int i = 0; i < 1024; i++)
C[index] += (A[1024 * index + i] * shared_b[i]);
__syncthreads();
}
void ConstantInit(double* data, int size) {
for (int i = 0; i < size; ++i) {
data[i] = i;
}
}
/**
* Run a simple test of matrix multiplication using CUDA
*/
int MatrixMultiply(const dim3& dimsA, const dim3& dimsB) {
// Allocate host memory for matrices A and B
unsigned int size_A = dimsA.x * dimsA.y;
unsigned int mem_size_A = sizeof(double) * size_A;
double* h_A;
checkCudaErrors(hipHostMalloc(&h_A, mem_size_A));
unsigned int size_B = dimsB.x * dimsB.y;
unsigned int mem_size_B = sizeof(double) * size_B;
double* h_B;
checkCudaErrors(hipHostMalloc(&h_B, mem_size_B));
hipStream_t stream;
ConstantInit(h_A, size_A);
ConstantInit(h_B, size_B);
// Allocate device memory
double* d_A, * d_B, * d_C;
// Allocate host matrix C
dim3 dimsC(dimsB.x, 1, 1);
unsigned int mem_size_C = dimsC.x * dimsC.y * sizeof(double);
double* h_C;
checkCudaErrors(hipHostMalloc(&h_C, mem_size_C));
if (h_C == NULL) {
fprintf(stderr, "Failed to allocate host matrix C!\n");
exit(EXIT_FAILURE);
}
checkCudaErrors(hipMalloc(reinterpret_cast<void**>(&d_A), mem_size_A));
checkCudaErrors(hipMalloc(reinterpret_cast<void**>(&d_B), mem_size_B));
checkCudaErrors(hipMalloc(reinterpret_cast<void**>(&d_C), mem_size_C));
checkCudaErrors(hipStreamCreateWithFlags(&stream, hipStreamNonBlocking));
// copy host memory to device
checkCudaErrors(hipMemcpyAsync(d_A, h_A, mem_size_A, hipMemcpyHostToDevice, stream));
checkCudaErrors(hipMemcpyAsync(d_B, h_B, mem_size_B, hipMemcpyHostToDevice, stream));
// Setup execution parameters
dim3 threads(1024);
dim3 grid(1);
hipEvent_t start, stop;
checkCudaErrors(hipEventCreate(&start));
checkCudaErrors(hipEventCreate(&stop));
checkCudaErrors(hipEventRecord(start, stream));
hipLaunchKernelGGL(( MatrixMulCUDA) , dim3(grid), dim3(threads), 0, stream , d_C, d_A, d_B);
checkCudaErrors(hipEventRecord(stop, stream));
// Wait for the stop event to complete
checkCudaErrors(hipEventSynchronize(stop));
float msecTotal = 0.0f;
checkCudaErrors(hipEventElapsedTime(&msecTotal, start, stop));
printf("Total time in msec: %f\n", msecTotal);
// Copy result from device to host
checkCudaErrors(hipMemcpyAsync(h_C, d_C, mem_size_C, hipMemcpyDeviceToHost, stream));
checkCudaErrors(hipStreamSynchronize(stream));
// Clean up memory
checkCudaErrors(hipHostFree(h_A));
checkCudaErrors(hipHostFree(h_B));
checkCudaErrors(hipHostFree(h_C));
checkCudaErrors(hipFree(d_A));
checkCudaErrors(hipFree(d_B));
checkCudaErrors(hipFree(d_C));
checkCudaErrors(hipEventDestroy(start));
checkCudaErrors(hipEventDestroy(stop));
return 0;
}
int main(int argc, char** argv) {
// This will pick the best possible CUDA capable device, otherwise
// override the device ID based on input provided at the command line
int dev = findCudaDevice(argc, (const char**)argv);
int oneK = 1024;
dim3 dimsA(oneK, oneK, 1);
dim3 dimsB(oneK, 1, 1);
int matrix_result = MatrixMultiply(dimsA, dimsB);
exit(matrix_result);
}
| 6bdbb608945374e97531ba42a36879bcfe7c69a6.cu | // System includes
#include <stdio.h>
#include <assert.h>
// CUDA runtime
#include <cuda_runtime.h>
// Helper functions and utilities to work with CUDA
#include <helper_functions.h>
#include <helper_cuda.h>
__global__ void MatrixMulCUDA(double* C, double* A, double* B) {
int index = threadIdx.x;
__shared__ double shared_b[1024];
shared_b[index] = B[index];
__syncthreads();
for (int i = 0; i < 1024; i++)
C[index] += (A[1024 * index + i] * shared_b[i]);
__syncthreads();
}
void ConstantInit(double* data, int size) {
for (int i = 0; i < size; ++i) {
data[i] = i;
}
}
/**
* Run a simple test of matrix multiplication using CUDA
*/
int MatrixMultiply(const dim3& dimsA, const dim3& dimsB) {
// Allocate host memory for matrices A and B
unsigned int size_A = dimsA.x * dimsA.y;
unsigned int mem_size_A = sizeof(double) * size_A;
double* h_A;
checkCudaErrors(cudaMallocHost(&h_A, mem_size_A));
unsigned int size_B = dimsB.x * dimsB.y;
unsigned int mem_size_B = sizeof(double) * size_B;
double* h_B;
checkCudaErrors(cudaMallocHost(&h_B, mem_size_B));
cudaStream_t stream;
ConstantInit(h_A, size_A);
ConstantInit(h_B, size_B);
// Allocate device memory
double* d_A, * d_B, * d_C;
// Allocate host matrix C
dim3 dimsC(dimsB.x, 1, 1);
unsigned int mem_size_C = dimsC.x * dimsC.y * sizeof(double);
double* h_C;
checkCudaErrors(cudaMallocHost(&h_C, mem_size_C));
if (h_C == NULL) {
fprintf(stderr, "Failed to allocate host matrix C!\n");
exit(EXIT_FAILURE);
}
checkCudaErrors(cudaMalloc(reinterpret_cast<void**>(&d_A), mem_size_A));
checkCudaErrors(cudaMalloc(reinterpret_cast<void**>(&d_B), mem_size_B));
checkCudaErrors(cudaMalloc(reinterpret_cast<void**>(&d_C), mem_size_C));
checkCudaErrors(cudaStreamCreateWithFlags(&stream, cudaStreamNonBlocking));
// copy host memory to device
checkCudaErrors(cudaMemcpyAsync(d_A, h_A, mem_size_A, cudaMemcpyHostToDevice, stream));
checkCudaErrors(cudaMemcpyAsync(d_B, h_B, mem_size_B, cudaMemcpyHostToDevice, stream));
// Setup execution parameters
dim3 threads(1024);
dim3 grid(1);
cudaEvent_t start, stop;
checkCudaErrors(cudaEventCreate(&start));
checkCudaErrors(cudaEventCreate(&stop));
checkCudaErrors(cudaEventRecord(start, stream));
MatrixMulCUDA <<< grid, threads, 0, stream >>> (d_C, d_A, d_B);
checkCudaErrors(cudaEventRecord(stop, stream));
// Wait for the stop event to complete
checkCudaErrors(cudaEventSynchronize(stop));
float msecTotal = 0.0f;
checkCudaErrors(cudaEventElapsedTime(&msecTotal, start, stop));
printf("Total time in msec: %f\n", msecTotal);
// Copy result from device to host
checkCudaErrors(cudaMemcpyAsync(h_C, d_C, mem_size_C, cudaMemcpyDeviceToHost, stream));
checkCudaErrors(cudaStreamSynchronize(stream));
// Clean up memory
checkCudaErrors(cudaFreeHost(h_A));
checkCudaErrors(cudaFreeHost(h_B));
checkCudaErrors(cudaFreeHost(h_C));
checkCudaErrors(cudaFree(d_A));
checkCudaErrors(cudaFree(d_B));
checkCudaErrors(cudaFree(d_C));
checkCudaErrors(cudaEventDestroy(start));
checkCudaErrors(cudaEventDestroy(stop));
return 0;
}
int main(int argc, char** argv) {
// This will pick the best possible CUDA capable device, otherwise
// override the device ID based on input provided at the command line
int dev = findCudaDevice(argc, (const char**)argv);
int oneK = 1024;
dim3 dimsA(oneK, oneK, 1);
dim3 dimsB(oneK, 1, 1);
int matrix_result = MatrixMultiply(dimsA, dimsB);
exit(matrix_result);
}
|
8e7901d82dcff111e867e34643ed12bec289c1f3.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
//
// *** DO NOT EDIT ***
//
// This test has been automatically generated by
// builtins-nvtx-mma.py --ptx=63 --gpu-arch=75
//
// Make sure we can handle all builtins available on sm_75 with PTX63
// RUN: %clang_cc1 -triple nvptx64-unknown-unknown -target-cpu sm_75 \
// RUN: -fcuda-is-device -target-feature +ptx63 \
// RUN: -DPTX=63 -DSM=75 \
// RUN: -S -emit-llvm -o - -x cuda %s \
// RUN: | FileCheck -check-prefixes=CHECK_PTX61_SM70,CHECK_PTX63_SM75,CHECK_PTX63_SM72,CHECK_PTX60_SM70 %s
// Verify that all builtins have correct constraints.
// RUN: %clang_cc1 -triple nvptx-unknown-unknown \
// RUN: -target-cpu sm_60 -target-feature +ptx42 \
// RUN: -DPTX=63 -DSM=75 -fcuda-is-device -S -o /dev/null -x cuda \
// RUN: -verify %s
#if !defined(TORCH_HIP_VERSION)
#define __device__ __attribute__((device))
#define __global__ __attribute__((global))
#define __shared__ __attribute__((shared))
#define __constant__ __attribute__((constant))
typedef unsigned long long uint64_t;
#endif
// CHECK-LABEL: test_wmma_buitins
__device__ void test_wmma_buitins(int *src, int *dst,
float *fsrc, float *fdst, int ldm) {
#if (PTX >= 60) && (SM >= 70)
// CHECK_PTX60_SM70: call {{.*}} @llvm.nvvm.wmma.m16n16k16.load.a.col.stride.f16
// expected-error-re@+1 {{'__hmma_m16n16k16_ld_a' needs target feature sm_70{{.*}},ptx60{{.*}}}}
__hmma_m16n16k16_ld_a(dst, src, ldm, 1);
// CHECK_PTX60_SM70: call {{.*}} @llvm.nvvm.wmma.m16n16k16.load.a.row.stride.f16
// expected-error-re@+1 {{'__hmma_m16n16k16_ld_a' needs target feature sm_70{{.*}},ptx60{{.*}}}}
__hmma_m16n16k16_ld_a(dst, src, ldm, 0);
// CHECK_PTX60_SM70: call {{.*}} @llvm.nvvm.wmma.m16n16k16.load.b.col.stride.f16
// expected-error-re@+1 {{'__hmma_m16n16k16_ld_b' needs target feature sm_70{{.*}},ptx60{{.*}}}}
__hmma_m16n16k16_ld_b(dst, src, ldm, 1);
// CHECK_PTX60_SM70: call {{.*}} @llvm.nvvm.wmma.m16n16k16.load.b.row.stride.f16
// expected-error-re@+1 {{'__hmma_m16n16k16_ld_b' needs target feature sm_70{{.*}},ptx60{{.*}}}}
__hmma_m16n16k16_ld_b(dst, src, ldm, 0);
// CHECK_PTX60_SM70: call {{.*}} @llvm.nvvm.wmma.m16n16k16.load.c.col.stride.f16
// expected-error-re@+1 {{'__hmma_m16n16k16_ld_c_f16' needs target feature sm_70{{.*}},ptx60{{.*}}}}
__hmma_m16n16k16_ld_c_f16(dst, src, ldm, 1);
// CHECK_PTX60_SM70: call {{.*}} @llvm.nvvm.wmma.m16n16k16.load.c.row.stride.f16
// expected-error-re@+1 {{'__hmma_m16n16k16_ld_c_f16' needs target feature sm_70{{.*}},ptx60{{.*}}}}
__hmma_m16n16k16_ld_c_f16(dst, src, ldm, 0);
// CHECK_PTX60_SM70: call {{.*}} @llvm.nvvm.wmma.m16n16k16.load.c.col.stride.f32
// expected-error-re@+1 {{'__hmma_m16n16k16_ld_c_f32' needs target feature sm_70{{.*}},ptx60{{.*}}}}
__hmma_m16n16k16_ld_c_f32(fdst, fsrc, ldm, 1);
// CHECK_PTX60_SM70: call {{.*}} @llvm.nvvm.wmma.m16n16k16.load.c.row.stride.f32
// expected-error-re@+1 {{'__hmma_m16n16k16_ld_c_f32' needs target feature sm_70{{.*}},ptx60{{.*}}}}
__hmma_m16n16k16_ld_c_f32(fdst, fsrc, ldm, 0);
// CHECK_PTX60_SM70: call {{.*}} @llvm.nvvm.wmma.m16n16k16.store.d.col.stride.f16
// expected-error-re@+1 {{'__hmma_m16n16k16_st_c_f16' needs target feature sm_70{{.*}},ptx60{{.*}}}}
__hmma_m16n16k16_st_c_f16(dst, src, ldm, 1);
// CHECK_PTX60_SM70: call {{.*}} @llvm.nvvm.wmma.m16n16k16.store.d.row.stride.f16
// expected-error-re@+1 {{'__hmma_m16n16k16_st_c_f16' needs target feature sm_70{{.*}},ptx60{{.*}}}}
__hmma_m16n16k16_st_c_f16(dst, src, ldm, 0);
// CHECK_PTX60_SM70: call {{.*}} @llvm.nvvm.wmma.m16n16k16.store.d.col.stride.f32
// expected-error-re@+1 {{'__hmma_m16n16k16_st_c_f32' needs target feature sm_70{{.*}},ptx60{{.*}}}}
__hmma_m16n16k16_st_c_f32(fdst, fsrc, ldm, 1);
// CHECK_PTX60_SM70: call {{.*}} @llvm.nvvm.wmma.m16n16k16.store.d.row.stride.f32
// expected-error-re@+1 {{'__hmma_m16n16k16_st_c_f32' needs target feature sm_70{{.*}},ptx60{{.*}}}}
__hmma_m16n16k16_st_c_f32(fdst, fsrc, ldm, 0);
// CHECK_PTX60_SM70: call {{.*}} @llvm.nvvm.wmma.m16n16k16.mma.col.col.f16.f16
// expected-error-re@+1 {{'__hmma_m16n16k16_mma_f16f16' needs target feature sm_70{{.*}},ptx60{{.*}}}}
__hmma_m16n16k16_mma_f16f16(dst, src, src, src, 3, 0);
// CHECK_PTX60_SM70: call {{.*}} @llvm.nvvm.wmma.m16n16k16.mma.col.col.f16.f16.satfinite
// expected-error-re@+1 {{'__hmma_m16n16k16_mma_f16f16' needs target feature sm_70{{.*}},ptx60{{.*}}}}
__hmma_m16n16k16_mma_f16f16(dst, src, src, src, 3, 1);
// CHECK_PTX60_SM70: call {{.*}} @llvm.nvvm.wmma.m16n16k16.mma.col.row.f16.f16
// expected-error-re@+1 {{'__hmma_m16n16k16_mma_f16f16' needs target feature sm_70{{.*}},ptx60{{.*}}}}
__hmma_m16n16k16_mma_f16f16(dst, src, src, src, 2, 0);
// CHECK_PTX60_SM70: call {{.*}} @llvm.nvvm.wmma.m16n16k16.mma.col.row.f16.f16.satfinite
// expected-error-re@+1 {{'__hmma_m16n16k16_mma_f16f16' needs target feature sm_70{{.*}},ptx60{{.*}}}}
__hmma_m16n16k16_mma_f16f16(dst, src, src, src, 2, 1);
// CHECK_PTX60_SM70: call {{.*}} @llvm.nvvm.wmma.m16n16k16.mma.row.col.f16.f16
// expected-error-re@+1 {{'__hmma_m16n16k16_mma_f16f16' needs target feature sm_70{{.*}},ptx60{{.*}}}}
__hmma_m16n16k16_mma_f16f16(dst, src, src, src, 1, 0);
// CHECK_PTX60_SM70: call {{.*}} @llvm.nvvm.wmma.m16n16k16.mma.row.col.f16.f16.satfinite
// expected-error-re@+1 {{'__hmma_m16n16k16_mma_f16f16' needs target feature sm_70{{.*}},ptx60{{.*}}}}
__hmma_m16n16k16_mma_f16f16(dst, src, src, src, 1, 1);
// CHECK_PTX60_SM70: call {{.*}} @llvm.nvvm.wmma.m16n16k16.mma.row.row.f16.f16
// expected-error-re@+1 {{'__hmma_m16n16k16_mma_f16f16' needs target feature sm_70{{.*}},ptx60{{.*}}}}
__hmma_m16n16k16_mma_f16f16(dst, src, src, src, 0, 0);
// CHECK_PTX60_SM70: call {{.*}} @llvm.nvvm.wmma.m16n16k16.mma.row.row.f16.f16.satfinite
// expected-error-re@+1 {{'__hmma_m16n16k16_mma_f16f16' needs target feature sm_70{{.*}},ptx60{{.*}}}}
__hmma_m16n16k16_mma_f16f16(dst, src, src, src, 0, 1);
// CHECK_PTX60_SM70: call {{.*}} @llvm.nvvm.wmma.m16n16k16.mma.col.col.f32.f16
// expected-error-re@+1 {{'__hmma_m16n16k16_mma_f32f16' needs target feature sm_70{{.*}},ptx60{{.*}}}}
__hmma_m16n16k16_mma_f32f16(fdst, src, src, src, 3, 0);
// CHECK_PTX60_SM70: call {{.*}} @llvm.nvvm.wmma.m16n16k16.mma.col.col.f32.f16.satfinite
// expected-error-re@+1 {{'__hmma_m16n16k16_mma_f32f16' needs target feature sm_70{{.*}},ptx60{{.*}}}}
__hmma_m16n16k16_mma_f32f16(fdst, src, src, src, 3, 1);
// CHECK_PTX60_SM70: call {{.*}} @llvm.nvvm.wmma.m16n16k16.mma.col.row.f32.f16
// expected-error-re@+1 {{'__hmma_m16n16k16_mma_f32f16' needs target feature sm_70{{.*}},ptx60{{.*}}}}
__hmma_m16n16k16_mma_f32f16(fdst, src, src, src, 2, 0);
// CHECK_PTX60_SM70: call {{.*}} @llvm.nvvm.wmma.m16n16k16.mma.col.row.f32.f16.satfinite
// expected-error-re@+1 {{'__hmma_m16n16k16_mma_f32f16' needs target feature sm_70{{.*}},ptx60{{.*}}}}
__hmma_m16n16k16_mma_f32f16(fdst, src, src, src, 2, 1);
// CHECK_PTX60_SM70: call {{.*}} @llvm.nvvm.wmma.m16n16k16.mma.row.col.f32.f16
// expected-error-re@+1 {{'__hmma_m16n16k16_mma_f32f16' needs target feature sm_70{{.*}},ptx60{{.*}}}}
__hmma_m16n16k16_mma_f32f16(fdst, src, src, src, 1, 0);
// CHECK_PTX60_SM70: call {{.*}} @llvm.nvvm.wmma.m16n16k16.mma.row.col.f32.f16.satfinite
// expected-error-re@+1 {{'__hmma_m16n16k16_mma_f32f16' needs target feature sm_70{{.*}},ptx60{{.*}}}}
__hmma_m16n16k16_mma_f32f16(fdst, src, src, src, 1, 1);
// CHECK_PTX60_SM70: call {{.*}} @llvm.nvvm.wmma.m16n16k16.mma.row.row.f32.f16
// expected-error-re@+1 {{'__hmma_m16n16k16_mma_f32f16' needs target feature sm_70{{.*}},ptx60{{.*}}}}
__hmma_m16n16k16_mma_f32f16(fdst, src, src, src, 0, 0);
// CHECK_PTX60_SM70: call {{.*}} @llvm.nvvm.wmma.m16n16k16.mma.row.row.f32.f16.satfinite
// expected-error-re@+1 {{'__hmma_m16n16k16_mma_f32f16' needs target feature sm_70{{.*}},ptx60{{.*}}}}
__hmma_m16n16k16_mma_f32f16(fdst, src, src, src, 0, 1);
// CHECK_PTX60_SM70: call {{.*}} @llvm.nvvm.wmma.m16n16k16.mma.col.col.f16.f32
// expected-error-re@+1 {{'__hmma_m16n16k16_mma_f16f32' needs target feature sm_70{{.*}},ptx60{{.*}}}}
__hmma_m16n16k16_mma_f16f32(dst, src, src, fsrc, 3, 0);
// CHECK_PTX60_SM70: call {{.*}} @llvm.nvvm.wmma.m16n16k16.mma.col.col.f16.f32.satfinite
// expected-error-re@+1 {{'__hmma_m16n16k16_mma_f16f32' needs target feature sm_70{{.*}},ptx60{{.*}}}}
__hmma_m16n16k16_mma_f16f32(dst, src, src, fsrc, 3, 1);
// CHECK_PTX60_SM70: call {{.*}} @llvm.nvvm.wmma.m16n16k16.mma.col.row.f16.f32
// expected-error-re@+1 {{'__hmma_m16n16k16_mma_f16f32' needs target feature sm_70{{.*}},ptx60{{.*}}}}
__hmma_m16n16k16_mma_f16f32(dst, src, src, fsrc, 2, 0);
// CHECK_PTX60_SM70: call {{.*}} @llvm.nvvm.wmma.m16n16k16.mma.col.row.f16.f32.satfinite
// expected-error-re@+1 {{'__hmma_m16n16k16_mma_f16f32' needs target feature sm_70{{.*}},ptx60{{.*}}}}
__hmma_m16n16k16_mma_f16f32(dst, src, src, fsrc, 2, 1);
// CHECK_PTX60_SM70: call {{.*}} @llvm.nvvm.wmma.m16n16k16.mma.row.col.f16.f32
// expected-error-re@+1 {{'__hmma_m16n16k16_mma_f16f32' needs target feature sm_70{{.*}},ptx60{{.*}}}}
__hmma_m16n16k16_mma_f16f32(dst, src, src, fsrc, 1, 0);
// CHECK_PTX60_SM70: call {{.*}} @llvm.nvvm.wmma.m16n16k16.mma.row.col.f16.f32.satfinite
// expected-error-re@+1 {{'__hmma_m16n16k16_mma_f16f32' needs target feature sm_70{{.*}},ptx60{{.*}}}}
__hmma_m16n16k16_mma_f16f32(dst, src, src, fsrc, 1, 1);
// CHECK_PTX60_SM70: call {{.*}} @llvm.nvvm.wmma.m16n16k16.mma.row.row.f16.f32
// expected-error-re@+1 {{'__hmma_m16n16k16_mma_f16f32' needs target feature sm_70{{.*}},ptx60{{.*}}}}
__hmma_m16n16k16_mma_f16f32(dst, src, src, fsrc, 0, 0);
// CHECK_PTX60_SM70: call {{.*}} @llvm.nvvm.wmma.m16n16k16.mma.row.row.f16.f32.satfinite
// expected-error-re@+1 {{'__hmma_m16n16k16_mma_f16f32' needs target feature sm_70{{.*}},ptx60{{.*}}}}
__hmma_m16n16k16_mma_f16f32(dst, src, src, fsrc, 0, 1);
// CHECK_PTX60_SM70: call {{.*}} @llvm.nvvm.wmma.m16n16k16.mma.col.col.f32.f32
// expected-error-re@+1 {{'__hmma_m16n16k16_mma_f32f32' needs target feature sm_70{{.*}},ptx60{{.*}}}}
__hmma_m16n16k16_mma_f32f32(fdst, src, src, fsrc, 3, 0);
// CHECK_PTX60_SM70: call {{.*}} @llvm.nvvm.wmma.m16n16k16.mma.col.col.f32.f32.satfinite
// expected-error-re@+1 {{'__hmma_m16n16k16_mma_f32f32' needs target feature sm_70{{.*}},ptx60{{.*}}}}
__hmma_m16n16k16_mma_f32f32(fdst, src, src, fsrc, 3, 1);
// CHECK_PTX60_SM70: call {{.*}} @llvm.nvvm.wmma.m16n16k16.mma.col.row.f32.f32
// expected-error-re@+1 {{'__hmma_m16n16k16_mma_f32f32' needs target feature sm_70{{.*}},ptx60{{.*}}}}
__hmma_m16n16k16_mma_f32f32(fdst, src, src, fsrc, 2, 0);
// CHECK_PTX60_SM70: call {{.*}} @llvm.nvvm.wmma.m16n16k16.mma.col.row.f32.f32.satfinite
// expected-error-re@+1 {{'__hmma_m16n16k16_mma_f32f32' needs target feature sm_70{{.*}},ptx60{{.*}}}}
__hmma_m16n16k16_mma_f32f32(fdst, src, src, fsrc, 2, 1);
// CHECK_PTX60_SM70: call {{.*}} @llvm.nvvm.wmma.m16n16k16.mma.row.col.f32.f32
// expected-error-re@+1 {{'__hmma_m16n16k16_mma_f32f32' needs target feature sm_70{{.*}},ptx60{{.*}}}}
__hmma_m16n16k16_mma_f32f32(fdst, src, src, fsrc, 1, 0);
// CHECK_PTX60_SM70: call {{.*}} @llvm.nvvm.wmma.m16n16k16.mma.row.col.f32.f32.satfinite
// expected-error-re@+1 {{'__hmma_m16n16k16_mma_f32f32' needs target feature sm_70{{.*}},ptx60{{.*}}}}
__hmma_m16n16k16_mma_f32f32(fdst, src, src, fsrc, 1, 1);
// CHECK_PTX60_SM70: call {{.*}} @llvm.nvvm.wmma.m16n16k16.mma.row.row.f32.f32
// expected-error-re@+1 {{'__hmma_m16n16k16_mma_f32f32' needs target feature sm_70{{.*}},ptx60{{.*}}}}
__hmma_m16n16k16_mma_f32f32(fdst, src, src, fsrc, 0, 0);
// CHECK_PTX60_SM70: call {{.*}} @llvm.nvvm.wmma.m16n16k16.mma.row.row.f32.f32.satfinite
// expected-error-re@+1 {{'__hmma_m16n16k16_mma_f32f32' needs target feature sm_70{{.*}},ptx60{{.*}}}}
__hmma_m16n16k16_mma_f32f32(fdst, src, src, fsrc, 0, 1);
#endif // (PTX >= 60) && (SM >= 70)
#if (PTX >= 61) && (SM >= 70)
// CHECK_PTX61_SM70: call {{.*}} @llvm.nvvm.wmma.m32n8k16.load.a.col.stride.f16
// expected-error-re@+1 {{'__hmma_m32n8k16_ld_a' needs target feature sm_70{{.*}},ptx61{{.*}}}}
__hmma_m32n8k16_ld_a(dst, src, ldm, 1);
// CHECK_PTX61_SM70: call {{.*}} @llvm.nvvm.wmma.m32n8k16.load.a.row.stride.f16
// expected-error-re@+1 {{'__hmma_m32n8k16_ld_a' needs target feature sm_70{{.*}},ptx61{{.*}}}}
__hmma_m32n8k16_ld_a(dst, src, ldm, 0);
// CHECK_PTX61_SM70: call {{.*}} @llvm.nvvm.wmma.m32n8k16.load.b.col.stride.f16
// expected-error-re@+1 {{'__hmma_m32n8k16_ld_b' needs target feature sm_70{{.*}},ptx61{{.*}}}}
__hmma_m32n8k16_ld_b(dst, src, ldm, 1);
// CHECK_PTX61_SM70: call {{.*}} @llvm.nvvm.wmma.m32n8k16.load.b.row.stride.f16
// expected-error-re@+1 {{'__hmma_m32n8k16_ld_b' needs target feature sm_70{{.*}},ptx61{{.*}}}}
__hmma_m32n8k16_ld_b(dst, src, ldm, 0);
// CHECK_PTX61_SM70: call {{.*}} @llvm.nvvm.wmma.m32n8k16.load.c.col.stride.f16
// expected-error-re@+1 {{'__hmma_m32n8k16_ld_c_f16' needs target feature sm_70{{.*}},ptx61{{.*}}}}
__hmma_m32n8k16_ld_c_f16(dst, src, ldm, 1);
// CHECK_PTX61_SM70: call {{.*}} @llvm.nvvm.wmma.m32n8k16.load.c.row.stride.f16
// expected-error-re@+1 {{'__hmma_m32n8k16_ld_c_f16' needs target feature sm_70{{.*}},ptx61{{.*}}}}
__hmma_m32n8k16_ld_c_f16(dst, src, ldm, 0);
// CHECK_PTX61_SM70: call {{.*}} @llvm.nvvm.wmma.m32n8k16.load.c.col.stride.f32
// expected-error-re@+1 {{'__hmma_m32n8k16_ld_c_f32' needs target feature sm_70{{.*}},ptx61{{.*}}}}
__hmma_m32n8k16_ld_c_f32(fdst, fsrc, ldm, 1);
// CHECK_PTX61_SM70: call {{.*}} @llvm.nvvm.wmma.m32n8k16.load.c.row.stride.f32
// expected-error-re@+1 {{'__hmma_m32n8k16_ld_c_f32' needs target feature sm_70{{.*}},ptx61{{.*}}}}
__hmma_m32n8k16_ld_c_f32(fdst, fsrc, ldm, 0);
// CHECK_PTX61_SM70: call {{.*}} @llvm.nvvm.wmma.m32n8k16.store.d.col.stride.f16
// expected-error-re@+1 {{'__hmma_m32n8k16_st_c_f16' needs target feature sm_70{{.*}},ptx61{{.*}}}}
__hmma_m32n8k16_st_c_f16(dst, src, ldm, 1);
// CHECK_PTX61_SM70: call {{.*}} @llvm.nvvm.wmma.m32n8k16.store.d.row.stride.f16
// expected-error-re@+1 {{'__hmma_m32n8k16_st_c_f16' needs target feature sm_70{{.*}},ptx61{{.*}}}}
__hmma_m32n8k16_st_c_f16(dst, src, ldm, 0);
// CHECK_PTX61_SM70: call {{.*}} @llvm.nvvm.wmma.m32n8k16.store.d.col.stride.f32
// expected-error-re@+1 {{'__hmma_m32n8k16_st_c_f32' needs target feature sm_70{{.*}},ptx61{{.*}}}}
__hmma_m32n8k16_st_c_f32(fdst, fsrc, ldm, 1);
// CHECK_PTX61_SM70: call {{.*}} @llvm.nvvm.wmma.m32n8k16.store.d.row.stride.f32
// expected-error-re@+1 {{'__hmma_m32n8k16_st_c_f32' needs target feature sm_70{{.*}},ptx61{{.*}}}}
__hmma_m32n8k16_st_c_f32(fdst, fsrc, ldm, 0);
// CHECK_PTX61_SM70: call {{.*}} @llvm.nvvm.wmma.m8n32k16.load.a.col.stride.f16
// expected-error-re@+1 {{'__hmma_m8n32k16_ld_a' needs target feature sm_70{{.*}},ptx61{{.*}}}}
__hmma_m8n32k16_ld_a(dst, src, ldm, 1);
// CHECK_PTX61_SM70: call {{.*}} @llvm.nvvm.wmma.m8n32k16.load.a.row.stride.f16
// expected-error-re@+1 {{'__hmma_m8n32k16_ld_a' needs target feature sm_70{{.*}},ptx61{{.*}}}}
__hmma_m8n32k16_ld_a(dst, src, ldm, 0);
// CHECK_PTX61_SM70: call {{.*}} @llvm.nvvm.wmma.m8n32k16.load.b.col.stride.f16
// expected-error-re@+1 {{'__hmma_m8n32k16_ld_b' needs target feature sm_70{{.*}},ptx61{{.*}}}}
__hmma_m8n32k16_ld_b(dst, src, ldm, 1);
// CHECK_PTX61_SM70: call {{.*}} @llvm.nvvm.wmma.m8n32k16.load.b.row.stride.f16
// expected-error-re@+1 {{'__hmma_m8n32k16_ld_b' needs target feature sm_70{{.*}},ptx61{{.*}}}}
__hmma_m8n32k16_ld_b(dst, src, ldm, 0);
// CHECK_PTX61_SM70: call {{.*}} @llvm.nvvm.wmma.m8n32k16.load.c.col.stride.f16
// expected-error-re@+1 {{'__hmma_m8n32k16_ld_c_f16' needs target feature sm_70{{.*}},ptx61{{.*}}}}
__hmma_m8n32k16_ld_c_f16(dst, src, ldm, 1);
// CHECK_PTX61_SM70: call {{.*}} @llvm.nvvm.wmma.m8n32k16.load.c.row.stride.f16
// expected-error-re@+1 {{'__hmma_m8n32k16_ld_c_f16' needs target feature sm_70{{.*}},ptx61{{.*}}}}
__hmma_m8n32k16_ld_c_f16(dst, src, ldm, 0);
// CHECK_PTX61_SM70: call {{.*}} @llvm.nvvm.wmma.m8n32k16.load.c.col.stride.f32
// expected-error-re@+1 {{'__hmma_m8n32k16_ld_c_f32' needs target feature sm_70{{.*}},ptx61{{.*}}}}
__hmma_m8n32k16_ld_c_f32(fdst, fsrc, ldm, 1);
// CHECK_PTX61_SM70: call {{.*}} @llvm.nvvm.wmma.m8n32k16.load.c.row.stride.f32
// expected-error-re@+1 {{'__hmma_m8n32k16_ld_c_f32' needs target feature sm_70{{.*}},ptx61{{.*}}}}
__hmma_m8n32k16_ld_c_f32(fdst, fsrc, ldm, 0);
// CHECK_PTX61_SM70: call {{.*}} @llvm.nvvm.wmma.m8n32k16.store.d.col.stride.f16
// expected-error-re@+1 {{'__hmma_m8n32k16_st_c_f16' needs target feature sm_70{{.*}},ptx61{{.*}}}}
__hmma_m8n32k16_st_c_f16(dst, src, ldm, 1);
// CHECK_PTX61_SM70: call {{.*}} @llvm.nvvm.wmma.m8n32k16.store.d.row.stride.f16
// expected-error-re@+1 {{'__hmma_m8n32k16_st_c_f16' needs target feature sm_70{{.*}},ptx61{{.*}}}}
__hmma_m8n32k16_st_c_f16(dst, src, ldm, 0);
// CHECK_PTX61_SM70: call {{.*}} @llvm.nvvm.wmma.m8n32k16.store.d.col.stride.f32
// expected-error-re@+1 {{'__hmma_m8n32k16_st_c_f32' needs target feature sm_70{{.*}},ptx61{{.*}}}}
__hmma_m8n32k16_st_c_f32(fdst, fsrc, ldm, 1);
// CHECK_PTX61_SM70: call {{.*}} @llvm.nvvm.wmma.m8n32k16.store.d.row.stride.f32
// expected-error-re@+1 {{'__hmma_m8n32k16_st_c_f32' needs target feature sm_70{{.*}},ptx61{{.*}}}}
__hmma_m8n32k16_st_c_f32(fdst, fsrc, ldm, 0);
// CHECK_PTX61_SM70: call {{.*}} @llvm.nvvm.wmma.m32n8k16.mma.col.col.f16.f16
// expected-error-re@+1 {{'__hmma_m32n8k16_mma_f16f16' needs target feature sm_70{{.*}},ptx61{{.*}}}}
__hmma_m32n8k16_mma_f16f16(dst, src, src, src, 3, 0);
// CHECK_PTX61_SM70: call {{.*}} @llvm.nvvm.wmma.m32n8k16.mma.col.col.f16.f16.satfinite
// expected-error-re@+1 {{'__hmma_m32n8k16_mma_f16f16' needs target feature sm_70{{.*}},ptx61{{.*}}}}
__hmma_m32n8k16_mma_f16f16(dst, src, src, src, 3, 1);
// CHECK_PTX61_SM70: call {{.*}} @llvm.nvvm.wmma.m32n8k16.mma.col.row.f16.f16
// expected-error-re@+1 {{'__hmma_m32n8k16_mma_f16f16' needs target feature sm_70{{.*}},ptx61{{.*}}}}
__hmma_m32n8k16_mma_f16f16(dst, src, src, src, 2, 0);
// CHECK_PTX61_SM70: call {{.*}} @llvm.nvvm.wmma.m32n8k16.mma.col.row.f16.f16.satfinite
// expected-error-re@+1 {{'__hmma_m32n8k16_mma_f16f16' needs target feature sm_70{{.*}},ptx61{{.*}}}}
__hmma_m32n8k16_mma_f16f16(dst, src, src, src, 2, 1);
// CHECK_PTX61_SM70: call {{.*}} @llvm.nvvm.wmma.m32n8k16.mma.row.col.f16.f16
// expected-error-re@+1 {{'__hmma_m32n8k16_mma_f16f16' needs target feature sm_70{{.*}},ptx61{{.*}}}}
__hmma_m32n8k16_mma_f16f16(dst, src, src, src, 1, 0);
// CHECK_PTX61_SM70: call {{.*}} @llvm.nvvm.wmma.m32n8k16.mma.row.col.f16.f16.satfinite
// expected-error-re@+1 {{'__hmma_m32n8k16_mma_f16f16' needs target feature sm_70{{.*}},ptx61{{.*}}}}
__hmma_m32n8k16_mma_f16f16(dst, src, src, src, 1, 1);
// CHECK_PTX61_SM70: call {{.*}} @llvm.nvvm.wmma.m32n8k16.mma.row.row.f16.f16
// expected-error-re@+1 {{'__hmma_m32n8k16_mma_f16f16' needs target feature sm_70{{.*}},ptx61{{.*}}}}
__hmma_m32n8k16_mma_f16f16(dst, src, src, src, 0, 0);
// CHECK_PTX61_SM70: call {{.*}} @llvm.nvvm.wmma.m32n8k16.mma.row.row.f16.f16.satfinite
// expected-error-re@+1 {{'__hmma_m32n8k16_mma_f16f16' needs target feature sm_70{{.*}},ptx61{{.*}}}}
__hmma_m32n8k16_mma_f16f16(dst, src, src, src, 0, 1);
// CHECK_PTX61_SM70: call {{.*}} @llvm.nvvm.wmma.m32n8k16.mma.col.col.f32.f16
// expected-error-re@+1 {{'__hmma_m32n8k16_mma_f32f16' needs target feature sm_70{{.*}},ptx61{{.*}}}}
__hmma_m32n8k16_mma_f32f16(fdst, src, src, src, 3, 0);
// CHECK_PTX61_SM70: call {{.*}} @llvm.nvvm.wmma.m32n8k16.mma.col.col.f32.f16.satfinite
// expected-error-re@+1 {{'__hmma_m32n8k16_mma_f32f16' needs target feature sm_70{{.*}},ptx61{{.*}}}}
__hmma_m32n8k16_mma_f32f16(fdst, src, src, src, 3, 1);
// CHECK_PTX61_SM70: call {{.*}} @llvm.nvvm.wmma.m32n8k16.mma.col.row.f32.f16
// expected-error-re@+1 {{'__hmma_m32n8k16_mma_f32f16' needs target feature sm_70{{.*}},ptx61{{.*}}}}
__hmma_m32n8k16_mma_f32f16(fdst, src, src, src, 2, 0);
// CHECK_PTX61_SM70: call {{.*}} @llvm.nvvm.wmma.m32n8k16.mma.col.row.f32.f16.satfinite
// expected-error-re@+1 {{'__hmma_m32n8k16_mma_f32f16' needs target feature sm_70{{.*}},ptx61{{.*}}}}
__hmma_m32n8k16_mma_f32f16(fdst, src, src, src, 2, 1);
// CHECK_PTX61_SM70: call {{.*}} @llvm.nvvm.wmma.m32n8k16.mma.row.col.f32.f16
// expected-error-re@+1 {{'__hmma_m32n8k16_mma_f32f16' needs target feature sm_70{{.*}},ptx61{{.*}}}}
__hmma_m32n8k16_mma_f32f16(fdst, src, src, src, 1, 0);
// CHECK_PTX61_SM70: call {{.*}} @llvm.nvvm.wmma.m32n8k16.mma.row.col.f32.f16.satfinite
// expected-error-re@+1 {{'__hmma_m32n8k16_mma_f32f16' needs target feature sm_70{{.*}},ptx61{{.*}}}}
__hmma_m32n8k16_mma_f32f16(fdst, src, src, src, 1, 1);
// CHECK_PTX61_SM70: call {{.*}} @llvm.nvvm.wmma.m32n8k16.mma.row.row.f32.f16
// expected-error-re@+1 {{'__hmma_m32n8k16_mma_f32f16' needs target feature sm_70{{.*}},ptx61{{.*}}}}
__hmma_m32n8k16_mma_f32f16(fdst, src, src, src, 0, 0);
// CHECK_PTX61_SM70: call {{.*}} @llvm.nvvm.wmma.m32n8k16.mma.row.row.f32.f16.satfinite
// expected-error-re@+1 {{'__hmma_m32n8k16_mma_f32f16' needs target feature sm_70{{.*}},ptx61{{.*}}}}
__hmma_m32n8k16_mma_f32f16(fdst, src, src, src, 0, 1);
// CHECK_PTX61_SM70: call {{.*}} @llvm.nvvm.wmma.m32n8k16.mma.col.col.f16.f32
// expected-error-re@+1 {{'__hmma_m32n8k16_mma_f16f32' needs target feature sm_70{{.*}},ptx61{{.*}}}}
__hmma_m32n8k16_mma_f16f32(dst, src, src, fsrc, 3, 0);
// CHECK_PTX61_SM70: call {{.*}} @llvm.nvvm.wmma.m32n8k16.mma.col.col.f16.f32.satfinite
// expected-error-re@+1 {{'__hmma_m32n8k16_mma_f16f32' needs target feature sm_70{{.*}},ptx61{{.*}}}}
__hmma_m32n8k16_mma_f16f32(dst, src, src, fsrc, 3, 1);
// CHECK_PTX61_SM70: call {{.*}} @llvm.nvvm.wmma.m32n8k16.mma.col.row.f16.f32
// expected-error-re@+1 {{'__hmma_m32n8k16_mma_f16f32' needs target feature sm_70{{.*}},ptx61{{.*}}}}
__hmma_m32n8k16_mma_f16f32(dst, src, src, fsrc, 2, 0);
// CHECK_PTX61_SM70: call {{.*}} @llvm.nvvm.wmma.m32n8k16.mma.col.row.f16.f32.satfinite
// expected-error-re@+1 {{'__hmma_m32n8k16_mma_f16f32' needs target feature sm_70{{.*}},ptx61{{.*}}}}
__hmma_m32n8k16_mma_f16f32(dst, src, src, fsrc, 2, 1);
// CHECK_PTX61_SM70: call {{.*}} @llvm.nvvm.wmma.m32n8k16.mma.row.col.f16.f32
// expected-error-re@+1 {{'__hmma_m32n8k16_mma_f16f32' needs target feature sm_70{{.*}},ptx61{{.*}}}}
__hmma_m32n8k16_mma_f16f32(dst, src, src, fsrc, 1, 0);
// CHECK_PTX61_SM70: call {{.*}} @llvm.nvvm.wmma.m32n8k16.mma.row.col.f16.f32.satfinite
// expected-error-re@+1 {{'__hmma_m32n8k16_mma_f16f32' needs target feature sm_70{{.*}},ptx61{{.*}}}}
__hmma_m32n8k16_mma_f16f32(dst, src, src, fsrc, 1, 1);
// CHECK_PTX61_SM70: call {{.*}} @llvm.nvvm.wmma.m32n8k16.mma.row.row.f16.f32
// expected-error-re@+1 {{'__hmma_m32n8k16_mma_f16f32' needs target feature sm_70{{.*}},ptx61{{.*}}}}
__hmma_m32n8k16_mma_f16f32(dst, src, src, fsrc, 0, 0);
// CHECK_PTX61_SM70: call {{.*}} @llvm.nvvm.wmma.m32n8k16.mma.row.row.f16.f32.satfinite
// expected-error-re@+1 {{'__hmma_m32n8k16_mma_f16f32' needs target feature sm_70{{.*}},ptx61{{.*}}}}
__hmma_m32n8k16_mma_f16f32(dst, src, src, fsrc, 0, 1);
// CHECK_PTX61_SM70: call {{.*}} @llvm.nvvm.wmma.m32n8k16.mma.col.col.f32.f32
// expected-error-re@+1 {{'__hmma_m32n8k16_mma_f32f32' needs target feature sm_70{{.*}},ptx61{{.*}}}}
__hmma_m32n8k16_mma_f32f32(fdst, src, src, fsrc, 3, 0);
// CHECK_PTX61_SM70: call {{.*}} @llvm.nvvm.wmma.m32n8k16.mma.col.col.f32.f32.satfinite
// expected-error-re@+1 {{'__hmma_m32n8k16_mma_f32f32' needs target feature sm_70{{.*}},ptx61{{.*}}}}
__hmma_m32n8k16_mma_f32f32(fdst, src, src, fsrc, 3, 1);
// CHECK_PTX61_SM70: call {{.*}} @llvm.nvvm.wmma.m32n8k16.mma.col.row.f32.f32
// expected-error-re@+1 {{'__hmma_m32n8k16_mma_f32f32' needs target feature sm_70{{.*}},ptx61{{.*}}}}
__hmma_m32n8k16_mma_f32f32(fdst, src, src, fsrc, 2, 0);
// CHECK_PTX61_SM70: call {{.*}} @llvm.nvvm.wmma.m32n8k16.mma.col.row.f32.f32.satfinite
// expected-error-re@+1 {{'__hmma_m32n8k16_mma_f32f32' needs target feature sm_70{{.*}},ptx61{{.*}}}}
__hmma_m32n8k16_mma_f32f32(fdst, src, src, fsrc, 2, 1);
// CHECK_PTX61_SM70: call {{.*}} @llvm.nvvm.wmma.m32n8k16.mma.row.col.f32.f32
// expected-error-re@+1 {{'__hmma_m32n8k16_mma_f32f32' needs target feature sm_70{{.*}},ptx61{{.*}}}}
__hmma_m32n8k16_mma_f32f32(fdst, src, src, fsrc, 1, 0);
// CHECK_PTX61_SM70: call {{.*}} @llvm.nvvm.wmma.m32n8k16.mma.row.col.f32.f32.satfinite
// expected-error-re@+1 {{'__hmma_m32n8k16_mma_f32f32' needs target feature sm_70{{.*}},ptx61{{.*}}}}
__hmma_m32n8k16_mma_f32f32(fdst, src, src, fsrc, 1, 1);
// CHECK_PTX61_SM70: call {{.*}} @llvm.nvvm.wmma.m32n8k16.mma.row.row.f32.f32
// expected-error-re@+1 {{'__hmma_m32n8k16_mma_f32f32' needs target feature sm_70{{.*}},ptx61{{.*}}}}
__hmma_m32n8k16_mma_f32f32(fdst, src, src, fsrc, 0, 0);
// CHECK_PTX61_SM70: call {{.*}} @llvm.nvvm.wmma.m32n8k16.mma.row.row.f32.f32.satfinite
// expected-error-re@+1 {{'__hmma_m32n8k16_mma_f32f32' needs target feature sm_70{{.*}},ptx61{{.*}}}}
__hmma_m32n8k16_mma_f32f32(fdst, src, src, fsrc, 0, 1);
// CHECK_PTX61_SM70: call {{.*}} @llvm.nvvm.wmma.m8n32k16.mma.col.col.f16.f16
// expected-error-re@+1 {{'__hmma_m8n32k16_mma_f16f16' needs target feature sm_70{{.*}},ptx61{{.*}}}}
__hmma_m8n32k16_mma_f16f16(dst, src, src, src, 3, 0);
// CHECK_PTX61_SM70: call {{.*}} @llvm.nvvm.wmma.m8n32k16.mma.col.col.f16.f16.satfinite
// expected-error-re@+1 {{'__hmma_m8n32k16_mma_f16f16' needs target feature sm_70{{.*}},ptx61{{.*}}}}
__hmma_m8n32k16_mma_f16f16(dst, src, src, src, 3, 1);
// CHECK_PTX61_SM70: call {{.*}} @llvm.nvvm.wmma.m8n32k16.mma.col.row.f16.f16
// expected-error-re@+1 {{'__hmma_m8n32k16_mma_f16f16' needs target feature sm_70{{.*}},ptx61{{.*}}}}
__hmma_m8n32k16_mma_f16f16(dst, src, src, src, 2, 0);
// CHECK_PTX61_SM70: call {{.*}} @llvm.nvvm.wmma.m8n32k16.mma.col.row.f16.f16.satfinite
// expected-error-re@+1 {{'__hmma_m8n32k16_mma_f16f16' needs target feature sm_70{{.*}},ptx61{{.*}}}}
__hmma_m8n32k16_mma_f16f16(dst, src, src, src, 2, 1);
// CHECK_PTX61_SM70: call {{.*}} @llvm.nvvm.wmma.m8n32k16.mma.row.col.f16.f16
// expected-error-re@+1 {{'__hmma_m8n32k16_mma_f16f16' needs target feature sm_70{{.*}},ptx61{{.*}}}}
__hmma_m8n32k16_mma_f16f16(dst, src, src, src, 1, 0);
// CHECK_PTX61_SM70: call {{.*}} @llvm.nvvm.wmma.m8n32k16.mma.row.col.f16.f16.satfinite
// expected-error-re@+1 {{'__hmma_m8n32k16_mma_f16f16' needs target feature sm_70{{.*}},ptx61{{.*}}}}
__hmma_m8n32k16_mma_f16f16(dst, src, src, src, 1, 1);
// CHECK_PTX61_SM70: call {{.*}} @llvm.nvvm.wmma.m8n32k16.mma.row.row.f16.f16
// expected-error-re@+1 {{'__hmma_m8n32k16_mma_f16f16' needs target feature sm_70{{.*}},ptx61{{.*}}}}
__hmma_m8n32k16_mma_f16f16(dst, src, src, src, 0, 0);
// CHECK_PTX61_SM70: call {{.*}} @llvm.nvvm.wmma.m8n32k16.mma.row.row.f16.f16.satfinite
// expected-error-re@+1 {{'__hmma_m8n32k16_mma_f16f16' needs target feature sm_70{{.*}},ptx61{{.*}}}}
__hmma_m8n32k16_mma_f16f16(dst, src, src, src, 0, 1);
// CHECK_PTX61_SM70: call {{.*}} @llvm.nvvm.wmma.m8n32k16.mma.col.col.f32.f16
// expected-error-re@+1 {{'__hmma_m8n32k16_mma_f32f16' needs target feature sm_70{{.*}},ptx61{{.*}}}}
__hmma_m8n32k16_mma_f32f16(fdst, src, src, src, 3, 0);
// CHECK_PTX61_SM70: call {{.*}} @llvm.nvvm.wmma.m8n32k16.mma.col.col.f32.f16.satfinite
// expected-error-re@+1 {{'__hmma_m8n32k16_mma_f32f16' needs target feature sm_70{{.*}},ptx61{{.*}}}}
__hmma_m8n32k16_mma_f32f16(fdst, src, src, src, 3, 1);
// CHECK_PTX61_SM70: call {{.*}} @llvm.nvvm.wmma.m8n32k16.mma.col.row.f32.f16
// expected-error-re@+1 {{'__hmma_m8n32k16_mma_f32f16' needs target feature sm_70{{.*}},ptx61{{.*}}}}
__hmma_m8n32k16_mma_f32f16(fdst, src, src, src, 2, 0);
// CHECK_PTX61_SM70: call {{.*}} @llvm.nvvm.wmma.m8n32k16.mma.col.row.f32.f16.satfinite
// expected-error-re@+1 {{'__hmma_m8n32k16_mma_f32f16' needs target feature sm_70{{.*}},ptx61{{.*}}}}
__hmma_m8n32k16_mma_f32f16(fdst, src, src, src, 2, 1);
// CHECK_PTX61_SM70: call {{.*}} @llvm.nvvm.wmma.m8n32k16.mma.row.col.f32.f16
// expected-error-re@+1 {{'__hmma_m8n32k16_mma_f32f16' needs target feature sm_70{{.*}},ptx61{{.*}}}}
__hmma_m8n32k16_mma_f32f16(fdst, src, src, src, 1, 0);
// CHECK_PTX61_SM70: call {{.*}} @llvm.nvvm.wmma.m8n32k16.mma.row.col.f32.f16.satfinite
// expected-error-re@+1 {{'__hmma_m8n32k16_mma_f32f16' needs target feature sm_70{{.*}},ptx61{{.*}}}}
__hmma_m8n32k16_mma_f32f16(fdst, src, src, src, 1, 1);
// CHECK_PTX61_SM70: call {{.*}} @llvm.nvvm.wmma.m8n32k16.mma.row.row.f32.f16
// expected-error-re@+1 {{'__hmma_m8n32k16_mma_f32f16' needs target feature sm_70{{.*}},ptx61{{.*}}}}
__hmma_m8n32k16_mma_f32f16(fdst, src, src, src, 0, 0);
// CHECK_PTX61_SM70: call {{.*}} @llvm.nvvm.wmma.m8n32k16.mma.row.row.f32.f16.satfinite
// expected-error-re@+1 {{'__hmma_m8n32k16_mma_f32f16' needs target feature sm_70{{.*}},ptx61{{.*}}}}
__hmma_m8n32k16_mma_f32f16(fdst, src, src, src, 0, 1);
// CHECK_PTX61_SM70: call {{.*}} @llvm.nvvm.wmma.m8n32k16.mma.col.col.f16.f32
// expected-error-re@+1 {{'__hmma_m8n32k16_mma_f16f32' needs target feature sm_70{{.*}},ptx61{{.*}}}}
__hmma_m8n32k16_mma_f16f32(dst, src, src, fsrc, 3, 0);
// CHECK_PTX61_SM70: call {{.*}} @llvm.nvvm.wmma.m8n32k16.mma.col.col.f16.f32.satfinite
// expected-error-re@+1 {{'__hmma_m8n32k16_mma_f16f32' needs target feature sm_70{{.*}},ptx61{{.*}}}}
__hmma_m8n32k16_mma_f16f32(dst, src, src, fsrc, 3, 1);
// CHECK_PTX61_SM70: call {{.*}} @llvm.nvvm.wmma.m8n32k16.mma.col.row.f16.f32
// expected-error-re@+1 {{'__hmma_m8n32k16_mma_f16f32' needs target feature sm_70{{.*}},ptx61{{.*}}}}
__hmma_m8n32k16_mma_f16f32(dst, src, src, fsrc, 2, 0);
// CHECK_PTX61_SM70: call {{.*}} @llvm.nvvm.wmma.m8n32k16.mma.col.row.f16.f32.satfinite
// expected-error-re@+1 {{'__hmma_m8n32k16_mma_f16f32' needs target feature sm_70{{.*}},ptx61{{.*}}}}
__hmma_m8n32k16_mma_f16f32(dst, src, src, fsrc, 2, 1);
// CHECK_PTX61_SM70: call {{.*}} @llvm.nvvm.wmma.m8n32k16.mma.row.col.f16.f32
// expected-error-re@+1 {{'__hmma_m8n32k16_mma_f16f32' needs target feature sm_70{{.*}},ptx61{{.*}}}}
__hmma_m8n32k16_mma_f16f32(dst, src, src, fsrc, 1, 0);
// CHECK_PTX61_SM70: call {{.*}} @llvm.nvvm.wmma.m8n32k16.mma.row.col.f16.f32.satfinite
// expected-error-re@+1 {{'__hmma_m8n32k16_mma_f16f32' needs target feature sm_70{{.*}},ptx61{{.*}}}}
__hmma_m8n32k16_mma_f16f32(dst, src, src, fsrc, 1, 1);
// CHECK_PTX61_SM70: call {{.*}} @llvm.nvvm.wmma.m8n32k16.mma.row.row.f16.f32
// expected-error-re@+1 {{'__hmma_m8n32k16_mma_f16f32' needs target feature sm_70{{.*}},ptx61{{.*}}}}
__hmma_m8n32k16_mma_f16f32(dst, src, src, fsrc, 0, 0);
// CHECK_PTX61_SM70: call {{.*}} @llvm.nvvm.wmma.m8n32k16.mma.row.row.f16.f32.satfinite
// expected-error-re@+1 {{'__hmma_m8n32k16_mma_f16f32' needs target feature sm_70{{.*}},ptx61{{.*}}}}
__hmma_m8n32k16_mma_f16f32(dst, src, src, fsrc, 0, 1);
// CHECK_PTX61_SM70: call {{.*}} @llvm.nvvm.wmma.m8n32k16.mma.col.col.f32.f32
// expected-error-re@+1 {{'__hmma_m8n32k16_mma_f32f32' needs target feature sm_70{{.*}},ptx61{{.*}}}}
__hmma_m8n32k16_mma_f32f32(fdst, src, src, fsrc, 3, 0);
// CHECK_PTX61_SM70: call {{.*}} @llvm.nvvm.wmma.m8n32k16.mma.col.col.f32.f32.satfinite
// expected-error-re@+1 {{'__hmma_m8n32k16_mma_f32f32' needs target feature sm_70{{.*}},ptx61{{.*}}}}
__hmma_m8n32k16_mma_f32f32(fdst, src, src, fsrc, 3, 1);
// CHECK_PTX61_SM70: call {{.*}} @llvm.nvvm.wmma.m8n32k16.mma.col.row.f32.f32
// expected-error-re@+1 {{'__hmma_m8n32k16_mma_f32f32' needs target feature sm_70{{.*}},ptx61{{.*}}}}
__hmma_m8n32k16_mma_f32f32(fdst, src, src, fsrc, 2, 0);
// CHECK_PTX61_SM70: call {{.*}} @llvm.nvvm.wmma.m8n32k16.mma.col.row.f32.f32.satfinite
// expected-error-re@+1 {{'__hmma_m8n32k16_mma_f32f32' needs target feature sm_70{{.*}},ptx61{{.*}}}}
__hmma_m8n32k16_mma_f32f32(fdst, src, src, fsrc, 2, 1);
// CHECK_PTX61_SM70: call {{.*}} @llvm.nvvm.wmma.m8n32k16.mma.row.col.f32.f32
// expected-error-re@+1 {{'__hmma_m8n32k16_mma_f32f32' needs target feature sm_70{{.*}},ptx61{{.*}}}}
__hmma_m8n32k16_mma_f32f32(fdst, src, src, fsrc, 1, 0);
// CHECK_PTX61_SM70: call {{.*}} @llvm.nvvm.wmma.m8n32k16.mma.row.col.f32.f32.satfinite
// expected-error-re@+1 {{'__hmma_m8n32k16_mma_f32f32' needs target feature sm_70{{.*}},ptx61{{.*}}}}
__hmma_m8n32k16_mma_f32f32(fdst, src, src, fsrc, 1, 1);
// CHECK_PTX61_SM70: call {{.*}} @llvm.nvvm.wmma.m8n32k16.mma.row.row.f32.f32
// expected-error-re@+1 {{'__hmma_m8n32k16_mma_f32f32' needs target feature sm_70{{.*}},ptx61{{.*}}}}
__hmma_m8n32k16_mma_f32f32(fdst, src, src, fsrc, 0, 0);
// CHECK_PTX61_SM70: call {{.*}} @llvm.nvvm.wmma.m8n32k16.mma.row.row.f32.f32.satfinite
// expected-error-re@+1 {{'__hmma_m8n32k16_mma_f32f32' needs target feature sm_70{{.*}},ptx61{{.*}}}}
__hmma_m8n32k16_mma_f32f32(fdst, src, src, fsrc, 0, 1);
#endif // (PTX >= 61) && (SM >= 70)
#if (PTX >= 63) && (SM >= 72)
// CHECK_PTX63_SM72: call {{.*}} @llvm.nvvm.wmma.m16n16k16.load.a.col.stride.s8
// expected-error-re@+1 {{'__imma_m16n16k16_ld_a_s8' needs target feature sm_72{{.*}},ptx63{{.*}}}}
__imma_m16n16k16_ld_a_s8(dst, src, ldm, 1);
// CHECK_PTX63_SM72: call {{.*}} @llvm.nvvm.wmma.m16n16k16.load.a.row.stride.s8
// expected-error-re@+1 {{'__imma_m16n16k16_ld_a_s8' needs target feature sm_72{{.*}},ptx63{{.*}}}}
__imma_m16n16k16_ld_a_s8(dst, src, ldm, 0);
// CHECK_PTX63_SM72: call {{.*}} @llvm.nvvm.wmma.m16n16k16.load.a.col.stride.u8
// expected-error-re@+1 {{'__imma_m16n16k16_ld_a_u8' needs target feature sm_72{{.*}},ptx63{{.*}}}}
__imma_m16n16k16_ld_a_u8(dst, src, ldm, 1);
// CHECK_PTX63_SM72: call {{.*}} @llvm.nvvm.wmma.m16n16k16.load.a.row.stride.u8
// expected-error-re@+1 {{'__imma_m16n16k16_ld_a_u8' needs target feature sm_72{{.*}},ptx63{{.*}}}}
__imma_m16n16k16_ld_a_u8(dst, src, ldm, 0);
// CHECK_PTX63_SM72: call {{.*}} @llvm.nvvm.wmma.m16n16k16.load.b.col.stride.s8
// expected-error-re@+1 {{'__imma_m16n16k16_ld_b_s8' needs target feature sm_72{{.*}},ptx63{{.*}}}}
__imma_m16n16k16_ld_b_s8(dst, src, ldm, 1);
// CHECK_PTX63_SM72: call {{.*}} @llvm.nvvm.wmma.m16n16k16.load.b.row.stride.s8
// expected-error-re@+1 {{'__imma_m16n16k16_ld_b_s8' needs target feature sm_72{{.*}},ptx63{{.*}}}}
__imma_m16n16k16_ld_b_s8(dst, src, ldm, 0);
// CHECK_PTX63_SM72: call {{.*}} @llvm.nvvm.wmma.m16n16k16.load.b.col.stride.u8
// expected-error-re@+1 {{'__imma_m16n16k16_ld_b_u8' needs target feature sm_72{{.*}},ptx63{{.*}}}}
__imma_m16n16k16_ld_b_u8(dst, src, ldm, 1);
// CHECK_PTX63_SM72: call {{.*}} @llvm.nvvm.wmma.m16n16k16.load.b.row.stride.u8
// expected-error-re@+1 {{'__imma_m16n16k16_ld_b_u8' needs target feature sm_72{{.*}},ptx63{{.*}}}}
__imma_m16n16k16_ld_b_u8(dst, src, ldm, 0);
// CHECK_PTX63_SM72: call {{.*}} @llvm.nvvm.wmma.m16n16k16.load.c.col.stride.s32
// expected-error-re@+1 {{'__imma_m16n16k16_ld_c' needs target feature sm_72{{.*}},ptx63{{.*}}}}
__imma_m16n16k16_ld_c(dst, src, ldm, 1);
// CHECK_PTX63_SM72: call {{.*}} @llvm.nvvm.wmma.m16n16k16.load.c.row.stride.s32
// expected-error-re@+1 {{'__imma_m16n16k16_ld_c' needs target feature sm_72{{.*}},ptx63{{.*}}}}
__imma_m16n16k16_ld_c(dst, src, ldm, 0);
// CHECK_PTX63_SM72: call {{.*}} @llvm.nvvm.wmma.m16n16k16.store.d.col.stride.s32
// expected-error-re@+1 {{'__imma_m16n16k16_st_c_i32' needs target feature sm_72{{.*}},ptx63{{.*}}}}
__imma_m16n16k16_st_c_i32(dst, src, ldm, 1);
// CHECK_PTX63_SM72: call {{.*}} @llvm.nvvm.wmma.m16n16k16.store.d.row.stride.s32
// expected-error-re@+1 {{'__imma_m16n16k16_st_c_i32' needs target feature sm_72{{.*}},ptx63{{.*}}}}
__imma_m16n16k16_st_c_i32(dst, src, ldm, 0);
// CHECK_PTX63_SM72: call {{.*}} @llvm.nvvm.wmma.m32n8k16.load.a.col.stride.s8
// expected-error-re@+1 {{'__imma_m32n8k16_ld_a_s8' needs target feature sm_72{{.*}},ptx63{{.*}}}}
__imma_m32n8k16_ld_a_s8(dst, src, ldm, 1);
// CHECK_PTX63_SM72: call {{.*}} @llvm.nvvm.wmma.m32n8k16.load.a.row.stride.s8
// expected-error-re@+1 {{'__imma_m32n8k16_ld_a_s8' needs target feature sm_72{{.*}},ptx63{{.*}}}}
__imma_m32n8k16_ld_a_s8(dst, src, ldm, 0);
// CHECK_PTX63_SM72: call {{.*}} @llvm.nvvm.wmma.m32n8k16.load.a.col.stride.u8
// expected-error-re@+1 {{'__imma_m32n8k16_ld_a_u8' needs target feature sm_72{{.*}},ptx63{{.*}}}}
__imma_m32n8k16_ld_a_u8(dst, src, ldm, 1);
// CHECK_PTX63_SM72: call {{.*}} @llvm.nvvm.wmma.m32n8k16.load.a.row.stride.u8
// expected-error-re@+1 {{'__imma_m32n8k16_ld_a_u8' needs target feature sm_72{{.*}},ptx63{{.*}}}}
__imma_m32n8k16_ld_a_u8(dst, src, ldm, 0);
// CHECK_PTX63_SM72: call {{.*}} @llvm.nvvm.wmma.m32n8k16.load.b.col.stride.s8
// expected-error-re@+1 {{'__imma_m32n8k16_ld_b_s8' needs target feature sm_72{{.*}},ptx63{{.*}}}}
__imma_m32n8k16_ld_b_s8(dst, src, ldm, 1);
// CHECK_PTX63_SM72: call {{.*}} @llvm.nvvm.wmma.m32n8k16.load.b.row.stride.s8
// expected-error-re@+1 {{'__imma_m32n8k16_ld_b_s8' needs target feature sm_72{{.*}},ptx63{{.*}}}}
__imma_m32n8k16_ld_b_s8(dst, src, ldm, 0);
// CHECK_PTX63_SM72: call {{.*}} @llvm.nvvm.wmma.m32n8k16.load.b.col.stride.u8
// expected-error-re@+1 {{'__imma_m32n8k16_ld_b_u8' needs target feature sm_72{{.*}},ptx63{{.*}}}}
__imma_m32n8k16_ld_b_u8(dst, src, ldm, 1);
// CHECK_PTX63_SM72: call {{.*}} @llvm.nvvm.wmma.m32n8k16.load.b.row.stride.u8
// expected-error-re@+1 {{'__imma_m32n8k16_ld_b_u8' needs target feature sm_72{{.*}},ptx63{{.*}}}}
__imma_m32n8k16_ld_b_u8(dst, src, ldm, 0);
// CHECK_PTX63_SM72: call {{.*}} @llvm.nvvm.wmma.m32n8k16.load.c.col.stride.s32
// expected-error-re@+1 {{'__imma_m32n8k16_ld_c' needs target feature sm_72{{.*}},ptx63{{.*}}}}
__imma_m32n8k16_ld_c(dst, src, ldm, 1);
// CHECK_PTX63_SM72: call {{.*}} @llvm.nvvm.wmma.m32n8k16.load.c.row.stride.s32
// expected-error-re@+1 {{'__imma_m32n8k16_ld_c' needs target feature sm_72{{.*}},ptx63{{.*}}}}
__imma_m32n8k16_ld_c(dst, src, ldm, 0);
// CHECK_PTX63_SM72: call {{.*}} @llvm.nvvm.wmma.m32n8k16.store.d.col.stride.s32
// expected-error-re@+1 {{'__imma_m32n8k16_st_c_i32' needs target feature sm_72{{.*}},ptx63{{.*}}}}
__imma_m32n8k16_st_c_i32(dst, src, ldm, 1);
// CHECK_PTX63_SM72: call {{.*}} @llvm.nvvm.wmma.m32n8k16.store.d.row.stride.s32
// expected-error-re@+1 {{'__imma_m32n8k16_st_c_i32' needs target feature sm_72{{.*}},ptx63{{.*}}}}
__imma_m32n8k16_st_c_i32(dst, src, ldm, 0);
// CHECK_PTX63_SM72: call {{.*}} @llvm.nvvm.wmma.m8n32k16.load.a.col.stride.s8
// expected-error-re@+1 {{'__imma_m8n32k16_ld_a_s8' needs target feature sm_72{{.*}},ptx63{{.*}}}}
__imma_m8n32k16_ld_a_s8(dst, src, ldm, 1);
// CHECK_PTX63_SM72: call {{.*}} @llvm.nvvm.wmma.m8n32k16.load.a.row.stride.s8
// expected-error-re@+1 {{'__imma_m8n32k16_ld_a_s8' needs target feature sm_72{{.*}},ptx63{{.*}}}}
__imma_m8n32k16_ld_a_s8(dst, src, ldm, 0);
// CHECK_PTX63_SM72: call {{.*}} @llvm.nvvm.wmma.m8n32k16.load.a.col.stride.u8
// expected-error-re@+1 {{'__imma_m8n32k16_ld_a_u8' needs target feature sm_72{{.*}},ptx63{{.*}}}}
__imma_m8n32k16_ld_a_u8(dst, src, ldm, 1);
// CHECK_PTX63_SM72: call {{.*}} @llvm.nvvm.wmma.m8n32k16.load.a.row.stride.u8
// expected-error-re@+1 {{'__imma_m8n32k16_ld_a_u8' needs target feature sm_72{{.*}},ptx63{{.*}}}}
__imma_m8n32k16_ld_a_u8(dst, src, ldm, 0);
// CHECK_PTX63_SM72: call {{.*}} @llvm.nvvm.wmma.m8n32k16.load.b.col.stride.s8
// expected-error-re@+1 {{'__imma_m8n32k16_ld_b_s8' needs target feature sm_72{{.*}},ptx63{{.*}}}}
__imma_m8n32k16_ld_b_s8(dst, src, ldm, 1);
// CHECK_PTX63_SM72: call {{.*}} @llvm.nvvm.wmma.m8n32k16.load.b.row.stride.s8
// expected-error-re@+1 {{'__imma_m8n32k16_ld_b_s8' needs target feature sm_72{{.*}},ptx63{{.*}}}}
__imma_m8n32k16_ld_b_s8(dst, src, ldm, 0);
// CHECK_PTX63_SM72: call {{.*}} @llvm.nvvm.wmma.m8n32k16.load.b.col.stride.u8
// expected-error-re@+1 {{'__imma_m8n32k16_ld_b_u8' needs target feature sm_72{{.*}},ptx63{{.*}}}}
__imma_m8n32k16_ld_b_u8(dst, src, ldm, 1);
// CHECK_PTX63_SM72: call {{.*}} @llvm.nvvm.wmma.m8n32k16.load.b.row.stride.u8
// expected-error-re@+1 {{'__imma_m8n32k16_ld_b_u8' needs target feature sm_72{{.*}},ptx63{{.*}}}}
__imma_m8n32k16_ld_b_u8(dst, src, ldm, 0);
// CHECK_PTX63_SM72: call {{.*}} @llvm.nvvm.wmma.m8n32k16.load.c.col.stride.s32
// expected-error-re@+1 {{'__imma_m8n32k16_ld_c' needs target feature sm_72{{.*}},ptx63{{.*}}}}
__imma_m8n32k16_ld_c(dst, src, ldm, 1);
// CHECK_PTX63_SM72: call {{.*}} @llvm.nvvm.wmma.m8n32k16.load.c.row.stride.s32
// expected-error-re@+1 {{'__imma_m8n32k16_ld_c' needs target feature sm_72{{.*}},ptx63{{.*}}}}
__imma_m8n32k16_ld_c(dst, src, ldm, 0);
// CHECK_PTX63_SM72: call {{.*}} @llvm.nvvm.wmma.m8n32k16.store.d.col.stride.s32
// expected-error-re@+1 {{'__imma_m8n32k16_st_c_i32' needs target feature sm_72{{.*}},ptx63{{.*}}}}
__imma_m8n32k16_st_c_i32(dst, src, ldm, 1);
// CHECK_PTX63_SM72: call {{.*}} @llvm.nvvm.wmma.m8n32k16.store.d.row.stride.s32
// expected-error-re@+1 {{'__imma_m8n32k16_st_c_i32' needs target feature sm_72{{.*}},ptx63{{.*}}}}
__imma_m8n32k16_st_c_i32(dst, src, ldm, 0);
// CHECK_PTX63_SM72: call {{.*}} @llvm.nvvm.wmma.m16n16k16.mma.col.col.s8
// expected-error-re@+1 {{'__imma_m16n16k16_mma_s8' needs target feature sm_72{{.*}},ptx63{{.*}}}}
__imma_m16n16k16_mma_s8(dst, src, src, src, 3, 0);
// CHECK_PTX63_SM72: call {{.*}} @llvm.nvvm.wmma.m16n16k16.mma.col.col.s8.satfinite
// expected-error-re@+1 {{'__imma_m16n16k16_mma_s8' needs target feature sm_72{{.*}},ptx63{{.*}}}}
__imma_m16n16k16_mma_s8(dst, src, src, src, 3, 1);
// CHECK_PTX63_SM72: call {{.*}} @llvm.nvvm.wmma.m16n16k16.mma.col.row.s8
// expected-error-re@+1 {{'__imma_m16n16k16_mma_s8' needs target feature sm_72{{.*}},ptx63{{.*}}}}
__imma_m16n16k16_mma_s8(dst, src, src, src, 2, 0);
// CHECK_PTX63_SM72: call {{.*}} @llvm.nvvm.wmma.m16n16k16.mma.col.row.s8.satfinite
// expected-error-re@+1 {{'__imma_m16n16k16_mma_s8' needs target feature sm_72{{.*}},ptx63{{.*}}}}
__imma_m16n16k16_mma_s8(dst, src, src, src, 2, 1);
// CHECK_PTX63_SM72: call {{.*}} @llvm.nvvm.wmma.m16n16k16.mma.row.col.s8
// expected-error-re@+1 {{'__imma_m16n16k16_mma_s8' needs target feature sm_72{{.*}},ptx63{{.*}}}}
__imma_m16n16k16_mma_s8(dst, src, src, src, 1, 0);
// CHECK_PTX63_SM72: call {{.*}} @llvm.nvvm.wmma.m16n16k16.mma.row.col.s8.satfinite
// expected-error-re@+1 {{'__imma_m16n16k16_mma_s8' needs target feature sm_72{{.*}},ptx63{{.*}}}}
__imma_m16n16k16_mma_s8(dst, src, src, src, 1, 1);
// CHECK_PTX63_SM72: call {{.*}} @llvm.nvvm.wmma.m16n16k16.mma.row.row.s8
// expected-error-re@+1 {{'__imma_m16n16k16_mma_s8' needs target feature sm_72{{.*}},ptx63{{.*}}}}
__imma_m16n16k16_mma_s8(dst, src, src, src, 0, 0);
// CHECK_PTX63_SM72: call {{.*}} @llvm.nvvm.wmma.m16n16k16.mma.row.row.s8.satfinite
// expected-error-re@+1 {{'__imma_m16n16k16_mma_s8' needs target feature sm_72{{.*}},ptx63{{.*}}}}
__imma_m16n16k16_mma_s8(dst, src, src, src, 0, 1);
// CHECK_PTX63_SM72: call {{.*}} @llvm.nvvm.wmma.m16n16k16.mma.col.col.u8
// expected-error-re@+1 {{'__imma_m16n16k16_mma_u8' needs target feature sm_72{{.*}},ptx63{{.*}}}}
__imma_m16n16k16_mma_u8(dst, src, src, src, 3, 0);
// CHECK_PTX63_SM72: call {{.*}} @llvm.nvvm.wmma.m16n16k16.mma.col.col.u8.satfinite
// expected-error-re@+1 {{'__imma_m16n16k16_mma_u8' needs target feature sm_72{{.*}},ptx63{{.*}}}}
__imma_m16n16k16_mma_u8(dst, src, src, src, 3, 1);
// CHECK_PTX63_SM72: call {{.*}} @llvm.nvvm.wmma.m16n16k16.mma.col.row.u8
// expected-error-re@+1 {{'__imma_m16n16k16_mma_u8' needs target feature sm_72{{.*}},ptx63{{.*}}}}
__imma_m16n16k16_mma_u8(dst, src, src, src, 2, 0);
// CHECK_PTX63_SM72: call {{.*}} @llvm.nvvm.wmma.m16n16k16.mma.col.row.u8.satfinite
// expected-error-re@+1 {{'__imma_m16n16k16_mma_u8' needs target feature sm_72{{.*}},ptx63{{.*}}}}
__imma_m16n16k16_mma_u8(dst, src, src, src, 2, 1);
// CHECK_PTX63_SM72: call {{.*}} @llvm.nvvm.wmma.m16n16k16.mma.row.col.u8
// expected-error-re@+1 {{'__imma_m16n16k16_mma_u8' needs target feature sm_72{{.*}},ptx63{{.*}}}}
__imma_m16n16k16_mma_u8(dst, src, src, src, 1, 0);
// CHECK_PTX63_SM72: call {{.*}} @llvm.nvvm.wmma.m16n16k16.mma.row.col.u8.satfinite
// expected-error-re@+1 {{'__imma_m16n16k16_mma_u8' needs target feature sm_72{{.*}},ptx63{{.*}}}}
__imma_m16n16k16_mma_u8(dst, src, src, src, 1, 1);
// CHECK_PTX63_SM72: call {{.*}} @llvm.nvvm.wmma.m16n16k16.mma.row.row.u8
// expected-error-re@+1 {{'__imma_m16n16k16_mma_u8' needs target feature sm_72{{.*}},ptx63{{.*}}}}
__imma_m16n16k16_mma_u8(dst, src, src, src, 0, 0);
// CHECK_PTX63_SM72: call {{.*}} @llvm.nvvm.wmma.m16n16k16.mma.row.row.u8.satfinite
// expected-error-re@+1 {{'__imma_m16n16k16_mma_u8' needs target feature sm_72{{.*}},ptx63{{.*}}}}
__imma_m16n16k16_mma_u8(dst, src, src, src, 0, 1);
// CHECK_PTX63_SM72: call {{.*}} @llvm.nvvm.wmma.m32n8k16.mma.col.col.s8
// expected-error-re@+1 {{'__imma_m32n8k16_mma_s8' needs target feature sm_72{{.*}},ptx63{{.*}}}}
__imma_m32n8k16_mma_s8(dst, src, src, src, 3, 0);
// CHECK_PTX63_SM72: call {{.*}} @llvm.nvvm.wmma.m32n8k16.mma.col.col.s8.satfinite
// expected-error-re@+1 {{'__imma_m32n8k16_mma_s8' needs target feature sm_72{{.*}},ptx63{{.*}}}}
__imma_m32n8k16_mma_s8(dst, src, src, src, 3, 1);
// CHECK_PTX63_SM72: call {{.*}} @llvm.nvvm.wmma.m32n8k16.mma.col.row.s8
// expected-error-re@+1 {{'__imma_m32n8k16_mma_s8' needs target feature sm_72{{.*}},ptx63{{.*}}}}
__imma_m32n8k16_mma_s8(dst, src, src, src, 2, 0);
// CHECK_PTX63_SM72: call {{.*}} @llvm.nvvm.wmma.m32n8k16.mma.col.row.s8.satfinite
// expected-error-re@+1 {{'__imma_m32n8k16_mma_s8' needs target feature sm_72{{.*}},ptx63{{.*}}}}
__imma_m32n8k16_mma_s8(dst, src, src, src, 2, 1);
// CHECK_PTX63_SM72: call {{.*}} @llvm.nvvm.wmma.m32n8k16.mma.row.col.s8
// expected-error-re@+1 {{'__imma_m32n8k16_mma_s8' needs target feature sm_72{{.*}},ptx63{{.*}}}}
__imma_m32n8k16_mma_s8(dst, src, src, src, 1, 0);
// CHECK_PTX63_SM72: call {{.*}} @llvm.nvvm.wmma.m32n8k16.mma.row.col.s8.satfinite
// expected-error-re@+1 {{'__imma_m32n8k16_mma_s8' needs target feature sm_72{{.*}},ptx63{{.*}}}}
__imma_m32n8k16_mma_s8(dst, src, src, src, 1, 1);
// CHECK_PTX63_SM72: call {{.*}} @llvm.nvvm.wmma.m32n8k16.mma.row.row.s8
// expected-error-re@+1 {{'__imma_m32n8k16_mma_s8' needs target feature sm_72{{.*}},ptx63{{.*}}}}
__imma_m32n8k16_mma_s8(dst, src, src, src, 0, 0);
// CHECK_PTX63_SM72: call {{.*}} @llvm.nvvm.wmma.m32n8k16.mma.row.row.s8.satfinite
// expected-error-re@+1 {{'__imma_m32n8k16_mma_s8' needs target feature sm_72{{.*}},ptx63{{.*}}}}
__imma_m32n8k16_mma_s8(dst, src, src, src, 0, 1);
// CHECK_PTX63_SM72: call {{.*}} @llvm.nvvm.wmma.m32n8k16.mma.col.col.u8
// expected-error-re@+1 {{'__imma_m32n8k16_mma_u8' needs target feature sm_72{{.*}},ptx63{{.*}}}}
__imma_m32n8k16_mma_u8(dst, src, src, src, 3, 0);
// CHECK_PTX63_SM72: call {{.*}} @llvm.nvvm.wmma.m32n8k16.mma.col.col.u8.satfinite
// expected-error-re@+1 {{'__imma_m32n8k16_mma_u8' needs target feature sm_72{{.*}},ptx63{{.*}}}}
__imma_m32n8k16_mma_u8(dst, src, src, src, 3, 1);
// CHECK_PTX63_SM72: call {{.*}} @llvm.nvvm.wmma.m32n8k16.mma.col.row.u8
// expected-error-re@+1 {{'__imma_m32n8k16_mma_u8' needs target feature sm_72{{.*}},ptx63{{.*}}}}
__imma_m32n8k16_mma_u8(dst, src, src, src, 2, 0);
// CHECK_PTX63_SM72: call {{.*}} @llvm.nvvm.wmma.m32n8k16.mma.col.row.u8.satfinite
// expected-error-re@+1 {{'__imma_m32n8k16_mma_u8' needs target feature sm_72{{.*}},ptx63{{.*}}}}
__imma_m32n8k16_mma_u8(dst, src, src, src, 2, 1);
// CHECK_PTX63_SM72: call {{.*}} @llvm.nvvm.wmma.m32n8k16.mma.row.col.u8
// expected-error-re@+1 {{'__imma_m32n8k16_mma_u8' needs target feature sm_72{{.*}},ptx63{{.*}}}}
__imma_m32n8k16_mma_u8(dst, src, src, src, 1, 0);
// CHECK_PTX63_SM72: call {{.*}} @llvm.nvvm.wmma.m32n8k16.mma.row.col.u8.satfinite
// expected-error-re@+1 {{'__imma_m32n8k16_mma_u8' needs target feature sm_72{{.*}},ptx63{{.*}}}}
__imma_m32n8k16_mma_u8(dst, src, src, src, 1, 1);
// CHECK_PTX63_SM72: call {{.*}} @llvm.nvvm.wmma.m32n8k16.mma.row.row.u8
// expected-error-re@+1 {{'__imma_m32n8k16_mma_u8' needs target feature sm_72{{.*}},ptx63{{.*}}}}
__imma_m32n8k16_mma_u8(dst, src, src, src, 0, 0);
// CHECK_PTX63_SM72: call {{.*}} @llvm.nvvm.wmma.m32n8k16.mma.row.row.u8.satfinite
// expected-error-re@+1 {{'__imma_m32n8k16_mma_u8' needs target feature sm_72{{.*}},ptx63{{.*}}}}
__imma_m32n8k16_mma_u8(dst, src, src, src, 0, 1);
// CHECK_PTX63_SM72: call {{.*}} @llvm.nvvm.wmma.m8n32k16.mma.col.col.s8
// expected-error-re@+1 {{'__imma_m8n32k16_mma_s8' needs target feature sm_72{{.*}},ptx63{{.*}}}}
__imma_m8n32k16_mma_s8(dst, src, src, src, 3, 0);
// CHECK_PTX63_SM72: call {{.*}} @llvm.nvvm.wmma.m8n32k16.mma.col.col.s8.satfinite
// expected-error-re@+1 {{'__imma_m8n32k16_mma_s8' needs target feature sm_72{{.*}},ptx63{{.*}}}}
__imma_m8n32k16_mma_s8(dst, src, src, src, 3, 1);
// CHECK_PTX63_SM72: call {{.*}} @llvm.nvvm.wmma.m8n32k16.mma.col.row.s8
// expected-error-re@+1 {{'__imma_m8n32k16_mma_s8' needs target feature sm_72{{.*}},ptx63{{.*}}}}
__imma_m8n32k16_mma_s8(dst, src, src, src, 2, 0);
// CHECK_PTX63_SM72: call {{.*}} @llvm.nvvm.wmma.m8n32k16.mma.col.row.s8.satfinite
// expected-error-re@+1 {{'__imma_m8n32k16_mma_s8' needs target feature sm_72{{.*}},ptx63{{.*}}}}
__imma_m8n32k16_mma_s8(dst, src, src, src, 2, 1);
// CHECK_PTX63_SM72: call {{.*}} @llvm.nvvm.wmma.m8n32k16.mma.row.col.s8
// expected-error-re@+1 {{'__imma_m8n32k16_mma_s8' needs target feature sm_72{{.*}},ptx63{{.*}}}}
__imma_m8n32k16_mma_s8(dst, src, src, src, 1, 0);
// CHECK_PTX63_SM72: call {{.*}} @llvm.nvvm.wmma.m8n32k16.mma.row.col.s8.satfinite
// expected-error-re@+1 {{'__imma_m8n32k16_mma_s8' needs target feature sm_72{{.*}},ptx63{{.*}}}}
__imma_m8n32k16_mma_s8(dst, src, src, src, 1, 1);
// CHECK_PTX63_SM72: call {{.*}} @llvm.nvvm.wmma.m8n32k16.mma.row.row.s8
// expected-error-re@+1 {{'__imma_m8n32k16_mma_s8' needs target feature sm_72{{.*}},ptx63{{.*}}}}
__imma_m8n32k16_mma_s8(dst, src, src, src, 0, 0);
// CHECK_PTX63_SM72: call {{.*}} @llvm.nvvm.wmma.m8n32k16.mma.row.row.s8.satfinite
// expected-error-re@+1 {{'__imma_m8n32k16_mma_s8' needs target feature sm_72{{.*}},ptx63{{.*}}}}
__imma_m8n32k16_mma_s8(dst, src, src, src, 0, 1);
// CHECK_PTX63_SM72: call {{.*}} @llvm.nvvm.wmma.m8n32k16.mma.col.col.u8
// expected-error-re@+1 {{'__imma_m8n32k16_mma_u8' needs target feature sm_72{{.*}},ptx63{{.*}}}}
__imma_m8n32k16_mma_u8(dst, src, src, src, 3, 0);
// CHECK_PTX63_SM72: call {{.*}} @llvm.nvvm.wmma.m8n32k16.mma.col.col.u8.satfinite
// expected-error-re@+1 {{'__imma_m8n32k16_mma_u8' needs target feature sm_72{{.*}},ptx63{{.*}}}}
__imma_m8n32k16_mma_u8(dst, src, src, src, 3, 1);
// CHECK_PTX63_SM72: call {{.*}} @llvm.nvvm.wmma.m8n32k16.mma.col.row.u8
// expected-error-re@+1 {{'__imma_m8n32k16_mma_u8' needs target feature sm_72{{.*}},ptx63{{.*}}}}
__imma_m8n32k16_mma_u8(dst, src, src, src, 2, 0);
// CHECK_PTX63_SM72: call {{.*}} @llvm.nvvm.wmma.m8n32k16.mma.col.row.u8.satfinite
// expected-error-re@+1 {{'__imma_m8n32k16_mma_u8' needs target feature sm_72{{.*}},ptx63{{.*}}}}
__imma_m8n32k16_mma_u8(dst, src, src, src, 2, 1);
// CHECK_PTX63_SM72: call {{.*}} @llvm.nvvm.wmma.m8n32k16.mma.row.col.u8
// expected-error-re@+1 {{'__imma_m8n32k16_mma_u8' needs target feature sm_72{{.*}},ptx63{{.*}}}}
__imma_m8n32k16_mma_u8(dst, src, src, src, 1, 0);
// CHECK_PTX63_SM72: call {{.*}} @llvm.nvvm.wmma.m8n32k16.mma.row.col.u8.satfinite
// expected-error-re@+1 {{'__imma_m8n32k16_mma_u8' needs target feature sm_72{{.*}},ptx63{{.*}}}}
__imma_m8n32k16_mma_u8(dst, src, src, src, 1, 1);
// CHECK_PTX63_SM72: call {{.*}} @llvm.nvvm.wmma.m8n32k16.mma.row.row.u8
// expected-error-re@+1 {{'__imma_m8n32k16_mma_u8' needs target feature sm_72{{.*}},ptx63{{.*}}}}
__imma_m8n32k16_mma_u8(dst, src, src, src, 0, 0);
// CHECK_PTX63_SM72: call {{.*}} @llvm.nvvm.wmma.m8n32k16.mma.row.row.u8.satfinite
// expected-error-re@+1 {{'__imma_m8n32k16_mma_u8' needs target feature sm_72{{.*}},ptx63{{.*}}}}
__imma_m8n32k16_mma_u8(dst, src, src, src, 0, 1);
#endif // (PTX >= 63) && (SM >= 72)
#if (PTX >= 63) && (SM >= 75)
// CHECK_PTX63_SM75: call {{.*}} @llvm.nvvm.wmma.m8n8k128.load.a.row.stride.b1
// expected-error-re@+1 {{'__bmma_m8n8k128_ld_a_b1' needs target feature sm_75{{.*}},ptx63{{.*}}}}
__bmma_m8n8k128_ld_a_b1(dst, src, ldm, 0);
// CHECK_PTX63_SM75: call {{.*}} @llvm.nvvm.wmma.m8n8k128.load.b.col.stride.b1
// expected-error-re@+1 {{'__bmma_m8n8k128_ld_b_b1' needs target feature sm_75{{.*}},ptx63{{.*}}}}
__bmma_m8n8k128_ld_b_b1(dst, src, ldm, 1);
// CHECK_PTX63_SM75: call {{.*}} @llvm.nvvm.wmma.m8n8k128.load.c.col.stride.s32
// expected-error-re@+1 {{'__bmma_m8n8k128_ld_c' needs target feature sm_75{{.*}},ptx63{{.*}}}}
__bmma_m8n8k128_ld_c(dst, src, ldm, 1);
// CHECK_PTX63_SM75: call {{.*}} @llvm.nvvm.wmma.m8n8k128.load.c.row.stride.s32
// expected-error-re@+1 {{'__bmma_m8n8k128_ld_c' needs target feature sm_75{{.*}},ptx63{{.*}}}}
__bmma_m8n8k128_ld_c(dst, src, ldm, 0);
// CHECK_PTX63_SM75: call {{.*}} @llvm.nvvm.wmma.m8n8k128.store.d.col.stride.s32
// expected-error-re@+1 {{'__bmma_m8n8k128_st_c_i32' needs target feature sm_75{{.*}},ptx63{{.*}}}}
__bmma_m8n8k128_st_c_i32(dst, src, ldm, 1);
// CHECK_PTX63_SM75: call {{.*}} @llvm.nvvm.wmma.m8n8k128.store.d.row.stride.s32
// expected-error-re@+1 {{'__bmma_m8n8k128_st_c_i32' needs target feature sm_75{{.*}},ptx63{{.*}}}}
__bmma_m8n8k128_st_c_i32(dst, src, ldm, 0);
// CHECK_PTX63_SM75: call {{.*}} @llvm.nvvm.wmma.m8n8k32.load.a.row.stride.s4
// expected-error-re@+1 {{'__imma_m8n8k32_ld_a_s4' needs target feature sm_75{{.*}},ptx63{{.*}}}}
__imma_m8n8k32_ld_a_s4(dst, src, ldm, 0);
// CHECK_PTX63_SM75: call {{.*}} @llvm.nvvm.wmma.m8n8k32.load.a.row.stride.u4
// expected-error-re@+1 {{'__imma_m8n8k32_ld_a_u4' needs target feature sm_75{{.*}},ptx63{{.*}}}}
__imma_m8n8k32_ld_a_u4(dst, src, ldm, 0);
// CHECK_PTX63_SM75: call {{.*}} @llvm.nvvm.wmma.m8n8k32.load.b.col.stride.s4
// expected-error-re@+1 {{'__imma_m8n8k32_ld_b_s4' needs target feature sm_75{{.*}},ptx63{{.*}}}}
__imma_m8n8k32_ld_b_s4(dst, src, ldm, 1);
// CHECK_PTX63_SM75: call {{.*}} @llvm.nvvm.wmma.m8n8k32.load.b.col.stride.u4
// expected-error-re@+1 {{'__imma_m8n8k32_ld_b_u4' needs target feature sm_75{{.*}},ptx63{{.*}}}}
__imma_m8n8k32_ld_b_u4(dst, src, ldm, 1);
// CHECK_PTX63_SM75: call {{.*}} @llvm.nvvm.wmma.m8n8k32.load.c.col.stride.s32
// expected-error-re@+1 {{'__imma_m8n8k32_ld_c' needs target feature sm_75{{.*}},ptx63{{.*}}}}
__imma_m8n8k32_ld_c(dst, src, ldm, 1);
// CHECK_PTX63_SM75: call {{.*}} @llvm.nvvm.wmma.m8n8k32.load.c.row.stride.s32
// expected-error-re@+1 {{'__imma_m8n8k32_ld_c' needs target feature sm_75{{.*}},ptx63{{.*}}}}
__imma_m8n8k32_ld_c(dst, src, ldm, 0);
// CHECK_PTX63_SM75: call {{.*}} @llvm.nvvm.wmma.m8n8k32.store.d.col.stride.s32
// expected-error-re@+1 {{'__imma_m8n8k32_st_c_i32' needs target feature sm_75{{.*}},ptx63{{.*}}}}
__imma_m8n8k32_st_c_i32(dst, src, ldm, 1);
// CHECK_PTX63_SM75: call {{.*}} @llvm.nvvm.wmma.m8n8k32.store.d.row.stride.s32
// expected-error-re@+1 {{'__imma_m8n8k32_st_c_i32' needs target feature sm_75{{.*}},ptx63{{.*}}}}
__imma_m8n8k32_st_c_i32(dst, src, ldm, 0);
// CHECK_PTX63_SM75: call {{.*}} @llvm.nvvm.wmma.m8n8k128.mma.row.col.b1
// expected-error-re@+1 {{'__bmma_m8n8k128_mma_xor_popc_b1' needs target feature sm_75{{.*}},ptx63{{.*}}}}
__bmma_m8n8k128_mma_xor_popc_b1(dst, src, src, src, 1);
// CHECK_PTX63_SM75: call {{.*}} @llvm.nvvm.wmma.m8n8k32.mma.row.col.s4
// expected-error-re@+1 {{'__imma_m8n8k32_mma_s4' needs target feature sm_75{{.*}},ptx63{{.*}}}}
__imma_m8n8k32_mma_s4(dst, src, src, src, 1, 0);
// CHECK_PTX63_SM75: call {{.*}} @llvm.nvvm.wmma.m8n8k32.mma.row.col.s4.satfinite
// expected-error-re@+1 {{'__imma_m8n8k32_mma_s4' needs target feature sm_75{{.*}},ptx63{{.*}}}}
__imma_m8n8k32_mma_s4(dst, src, src, src, 1, 1);
// CHECK_PTX63_SM75: call {{.*}} @llvm.nvvm.wmma.m8n8k32.mma.row.col.u4
// expected-error-re@+1 {{'__imma_m8n8k32_mma_u4' needs target feature sm_75{{.*}},ptx63{{.*}}}}
__imma_m8n8k32_mma_u4(dst, src, src, src, 1, 0);
// CHECK_PTX63_SM75: call {{.*}} @llvm.nvvm.wmma.m8n8k32.mma.row.col.u4.satfinite
// expected-error-re@+1 {{'__imma_m8n8k32_mma_u4' needs target feature sm_75{{.*}},ptx63{{.*}}}}
__imma_m8n8k32_mma_u4(dst, src, src, src, 1, 1);
#endif // (PTX >= 63) && (SM >= 75)
}
| 8e7901d82dcff111e867e34643ed12bec289c1f3.cu |
//
// *** DO NOT EDIT ***
//
// This test has been automatically generated by
// builtins-nvtx-mma.py --ptx=63 --gpu-arch=75
//
// Make sure we can handle all builtins available on sm_75 with PTX63
// RUN: %clang_cc1 -triple nvptx64-unknown-unknown -target-cpu sm_75 \
// RUN: -fcuda-is-device -target-feature +ptx63 \
// RUN: -DPTX=63 -DSM=75 \
// RUN: -S -emit-llvm -o - -x cuda %s \
// RUN: | FileCheck -check-prefixes=CHECK_PTX61_SM70,CHECK_PTX63_SM75,CHECK_PTX63_SM72,CHECK_PTX60_SM70 %s
// Verify that all builtins have correct constraints.
// RUN: %clang_cc1 -triple nvptx-unknown-unknown \
// RUN: -target-cpu sm_60 -target-feature +ptx42 \
// RUN: -DPTX=63 -DSM=75 -fcuda-is-device -S -o /dev/null -x cuda \
// RUN: -verify %s
#if !defined(CUDA_VERSION)
#define __device__ __attribute__((device))
#define __global__ __attribute__((global))
#define __shared__ __attribute__((shared))
#define __constant__ __attribute__((constant))
typedef unsigned long long uint64_t;
#endif
// CHECK-LABEL: test_wmma_buitins
__device__ void test_wmma_buitins(int *src, int *dst,
float *fsrc, float *fdst, int ldm) {
#if (PTX >= 60) && (SM >= 70)
// CHECK_PTX60_SM70: call {{.*}} @llvm.nvvm.wmma.m16n16k16.load.a.col.stride.f16
// expected-error-re@+1 {{'__hmma_m16n16k16_ld_a' needs target feature sm_70{{.*}},ptx60{{.*}}}}
__hmma_m16n16k16_ld_a(dst, src, ldm, 1);
// CHECK_PTX60_SM70: call {{.*}} @llvm.nvvm.wmma.m16n16k16.load.a.row.stride.f16
// expected-error-re@+1 {{'__hmma_m16n16k16_ld_a' needs target feature sm_70{{.*}},ptx60{{.*}}}}
__hmma_m16n16k16_ld_a(dst, src, ldm, 0);
// CHECK_PTX60_SM70: call {{.*}} @llvm.nvvm.wmma.m16n16k16.load.b.col.stride.f16
// expected-error-re@+1 {{'__hmma_m16n16k16_ld_b' needs target feature sm_70{{.*}},ptx60{{.*}}}}
__hmma_m16n16k16_ld_b(dst, src, ldm, 1);
// CHECK_PTX60_SM70: call {{.*}} @llvm.nvvm.wmma.m16n16k16.load.b.row.stride.f16
// expected-error-re@+1 {{'__hmma_m16n16k16_ld_b' needs target feature sm_70{{.*}},ptx60{{.*}}}}
__hmma_m16n16k16_ld_b(dst, src, ldm, 0);
// CHECK_PTX60_SM70: call {{.*}} @llvm.nvvm.wmma.m16n16k16.load.c.col.stride.f16
// expected-error-re@+1 {{'__hmma_m16n16k16_ld_c_f16' needs target feature sm_70{{.*}},ptx60{{.*}}}}
__hmma_m16n16k16_ld_c_f16(dst, src, ldm, 1);
// CHECK_PTX60_SM70: call {{.*}} @llvm.nvvm.wmma.m16n16k16.load.c.row.stride.f16
// expected-error-re@+1 {{'__hmma_m16n16k16_ld_c_f16' needs target feature sm_70{{.*}},ptx60{{.*}}}}
__hmma_m16n16k16_ld_c_f16(dst, src, ldm, 0);
// CHECK_PTX60_SM70: call {{.*}} @llvm.nvvm.wmma.m16n16k16.load.c.col.stride.f32
// expected-error-re@+1 {{'__hmma_m16n16k16_ld_c_f32' needs target feature sm_70{{.*}},ptx60{{.*}}}}
__hmma_m16n16k16_ld_c_f32(fdst, fsrc, ldm, 1);
// CHECK_PTX60_SM70: call {{.*}} @llvm.nvvm.wmma.m16n16k16.load.c.row.stride.f32
// expected-error-re@+1 {{'__hmma_m16n16k16_ld_c_f32' needs target feature sm_70{{.*}},ptx60{{.*}}}}
__hmma_m16n16k16_ld_c_f32(fdst, fsrc, ldm, 0);
// CHECK_PTX60_SM70: call {{.*}} @llvm.nvvm.wmma.m16n16k16.store.d.col.stride.f16
// expected-error-re@+1 {{'__hmma_m16n16k16_st_c_f16' needs target feature sm_70{{.*}},ptx60{{.*}}}}
__hmma_m16n16k16_st_c_f16(dst, src, ldm, 1);
// CHECK_PTX60_SM70: call {{.*}} @llvm.nvvm.wmma.m16n16k16.store.d.row.stride.f16
// expected-error-re@+1 {{'__hmma_m16n16k16_st_c_f16' needs target feature sm_70{{.*}},ptx60{{.*}}}}
__hmma_m16n16k16_st_c_f16(dst, src, ldm, 0);
// CHECK_PTX60_SM70: call {{.*}} @llvm.nvvm.wmma.m16n16k16.store.d.col.stride.f32
// expected-error-re@+1 {{'__hmma_m16n16k16_st_c_f32' needs target feature sm_70{{.*}},ptx60{{.*}}}}
__hmma_m16n16k16_st_c_f32(fdst, fsrc, ldm, 1);
// CHECK_PTX60_SM70: call {{.*}} @llvm.nvvm.wmma.m16n16k16.store.d.row.stride.f32
// expected-error-re@+1 {{'__hmma_m16n16k16_st_c_f32' needs target feature sm_70{{.*}},ptx60{{.*}}}}
__hmma_m16n16k16_st_c_f32(fdst, fsrc, ldm, 0);
// CHECK_PTX60_SM70: call {{.*}} @llvm.nvvm.wmma.m16n16k16.mma.col.col.f16.f16
// expected-error-re@+1 {{'__hmma_m16n16k16_mma_f16f16' needs target feature sm_70{{.*}},ptx60{{.*}}}}
__hmma_m16n16k16_mma_f16f16(dst, src, src, src, 3, 0);
// CHECK_PTX60_SM70: call {{.*}} @llvm.nvvm.wmma.m16n16k16.mma.col.col.f16.f16.satfinite
// expected-error-re@+1 {{'__hmma_m16n16k16_mma_f16f16' needs target feature sm_70{{.*}},ptx60{{.*}}}}
__hmma_m16n16k16_mma_f16f16(dst, src, src, src, 3, 1);
// CHECK_PTX60_SM70: call {{.*}} @llvm.nvvm.wmma.m16n16k16.mma.col.row.f16.f16
// expected-error-re@+1 {{'__hmma_m16n16k16_mma_f16f16' needs target feature sm_70{{.*}},ptx60{{.*}}}}
__hmma_m16n16k16_mma_f16f16(dst, src, src, src, 2, 0);
// CHECK_PTX60_SM70: call {{.*}} @llvm.nvvm.wmma.m16n16k16.mma.col.row.f16.f16.satfinite
// expected-error-re@+1 {{'__hmma_m16n16k16_mma_f16f16' needs target feature sm_70{{.*}},ptx60{{.*}}}}
__hmma_m16n16k16_mma_f16f16(dst, src, src, src, 2, 1);
// CHECK_PTX60_SM70: call {{.*}} @llvm.nvvm.wmma.m16n16k16.mma.row.col.f16.f16
// expected-error-re@+1 {{'__hmma_m16n16k16_mma_f16f16' needs target feature sm_70{{.*}},ptx60{{.*}}}}
__hmma_m16n16k16_mma_f16f16(dst, src, src, src, 1, 0);
// CHECK_PTX60_SM70: call {{.*}} @llvm.nvvm.wmma.m16n16k16.mma.row.col.f16.f16.satfinite
// expected-error-re@+1 {{'__hmma_m16n16k16_mma_f16f16' needs target feature sm_70{{.*}},ptx60{{.*}}}}
__hmma_m16n16k16_mma_f16f16(dst, src, src, src, 1, 1);
// CHECK_PTX60_SM70: call {{.*}} @llvm.nvvm.wmma.m16n16k16.mma.row.row.f16.f16
// expected-error-re@+1 {{'__hmma_m16n16k16_mma_f16f16' needs target feature sm_70{{.*}},ptx60{{.*}}}}
__hmma_m16n16k16_mma_f16f16(dst, src, src, src, 0, 0);
// CHECK_PTX60_SM70: call {{.*}} @llvm.nvvm.wmma.m16n16k16.mma.row.row.f16.f16.satfinite
// expected-error-re@+1 {{'__hmma_m16n16k16_mma_f16f16' needs target feature sm_70{{.*}},ptx60{{.*}}}}
__hmma_m16n16k16_mma_f16f16(dst, src, src, src, 0, 1);
// CHECK_PTX60_SM70: call {{.*}} @llvm.nvvm.wmma.m16n16k16.mma.col.col.f32.f16
// expected-error-re@+1 {{'__hmma_m16n16k16_mma_f32f16' needs target feature sm_70{{.*}},ptx60{{.*}}}}
__hmma_m16n16k16_mma_f32f16(fdst, src, src, src, 3, 0);
// CHECK_PTX60_SM70: call {{.*}} @llvm.nvvm.wmma.m16n16k16.mma.col.col.f32.f16.satfinite
// expected-error-re@+1 {{'__hmma_m16n16k16_mma_f32f16' needs target feature sm_70{{.*}},ptx60{{.*}}}}
__hmma_m16n16k16_mma_f32f16(fdst, src, src, src, 3, 1);
// CHECK_PTX60_SM70: call {{.*}} @llvm.nvvm.wmma.m16n16k16.mma.col.row.f32.f16
// expected-error-re@+1 {{'__hmma_m16n16k16_mma_f32f16' needs target feature sm_70{{.*}},ptx60{{.*}}}}
__hmma_m16n16k16_mma_f32f16(fdst, src, src, src, 2, 0);
// CHECK_PTX60_SM70: call {{.*}} @llvm.nvvm.wmma.m16n16k16.mma.col.row.f32.f16.satfinite
// expected-error-re@+1 {{'__hmma_m16n16k16_mma_f32f16' needs target feature sm_70{{.*}},ptx60{{.*}}}}
__hmma_m16n16k16_mma_f32f16(fdst, src, src, src, 2, 1);
// CHECK_PTX60_SM70: call {{.*}} @llvm.nvvm.wmma.m16n16k16.mma.row.col.f32.f16
// expected-error-re@+1 {{'__hmma_m16n16k16_mma_f32f16' needs target feature sm_70{{.*}},ptx60{{.*}}}}
__hmma_m16n16k16_mma_f32f16(fdst, src, src, src, 1, 0);
// CHECK_PTX60_SM70: call {{.*}} @llvm.nvvm.wmma.m16n16k16.mma.row.col.f32.f16.satfinite
// expected-error-re@+1 {{'__hmma_m16n16k16_mma_f32f16' needs target feature sm_70{{.*}},ptx60{{.*}}}}
__hmma_m16n16k16_mma_f32f16(fdst, src, src, src, 1, 1);
// CHECK_PTX60_SM70: call {{.*}} @llvm.nvvm.wmma.m16n16k16.mma.row.row.f32.f16
// expected-error-re@+1 {{'__hmma_m16n16k16_mma_f32f16' needs target feature sm_70{{.*}},ptx60{{.*}}}}
__hmma_m16n16k16_mma_f32f16(fdst, src, src, src, 0, 0);
// CHECK_PTX60_SM70: call {{.*}} @llvm.nvvm.wmma.m16n16k16.mma.row.row.f32.f16.satfinite
// expected-error-re@+1 {{'__hmma_m16n16k16_mma_f32f16' needs target feature sm_70{{.*}},ptx60{{.*}}}}
__hmma_m16n16k16_mma_f32f16(fdst, src, src, src, 0, 1);
// CHECK_PTX60_SM70: call {{.*}} @llvm.nvvm.wmma.m16n16k16.mma.col.col.f16.f32
// expected-error-re@+1 {{'__hmma_m16n16k16_mma_f16f32' needs target feature sm_70{{.*}},ptx60{{.*}}}}
__hmma_m16n16k16_mma_f16f32(dst, src, src, fsrc, 3, 0);
// CHECK_PTX60_SM70: call {{.*}} @llvm.nvvm.wmma.m16n16k16.mma.col.col.f16.f32.satfinite
// expected-error-re@+1 {{'__hmma_m16n16k16_mma_f16f32' needs target feature sm_70{{.*}},ptx60{{.*}}}}
__hmma_m16n16k16_mma_f16f32(dst, src, src, fsrc, 3, 1);
// CHECK_PTX60_SM70: call {{.*}} @llvm.nvvm.wmma.m16n16k16.mma.col.row.f16.f32
// expected-error-re@+1 {{'__hmma_m16n16k16_mma_f16f32' needs target feature sm_70{{.*}},ptx60{{.*}}}}
__hmma_m16n16k16_mma_f16f32(dst, src, src, fsrc, 2, 0);
// CHECK_PTX60_SM70: call {{.*}} @llvm.nvvm.wmma.m16n16k16.mma.col.row.f16.f32.satfinite
// expected-error-re@+1 {{'__hmma_m16n16k16_mma_f16f32' needs target feature sm_70{{.*}},ptx60{{.*}}}}
__hmma_m16n16k16_mma_f16f32(dst, src, src, fsrc, 2, 1);
// CHECK_PTX60_SM70: call {{.*}} @llvm.nvvm.wmma.m16n16k16.mma.row.col.f16.f32
// expected-error-re@+1 {{'__hmma_m16n16k16_mma_f16f32' needs target feature sm_70{{.*}},ptx60{{.*}}}}
__hmma_m16n16k16_mma_f16f32(dst, src, src, fsrc, 1, 0);
// CHECK_PTX60_SM70: call {{.*}} @llvm.nvvm.wmma.m16n16k16.mma.row.col.f16.f32.satfinite
// expected-error-re@+1 {{'__hmma_m16n16k16_mma_f16f32' needs target feature sm_70{{.*}},ptx60{{.*}}}}
__hmma_m16n16k16_mma_f16f32(dst, src, src, fsrc, 1, 1);
// CHECK_PTX60_SM70: call {{.*}} @llvm.nvvm.wmma.m16n16k16.mma.row.row.f16.f32
// expected-error-re@+1 {{'__hmma_m16n16k16_mma_f16f32' needs target feature sm_70{{.*}},ptx60{{.*}}}}
__hmma_m16n16k16_mma_f16f32(dst, src, src, fsrc, 0, 0);
// CHECK_PTX60_SM70: call {{.*}} @llvm.nvvm.wmma.m16n16k16.mma.row.row.f16.f32.satfinite
// expected-error-re@+1 {{'__hmma_m16n16k16_mma_f16f32' needs target feature sm_70{{.*}},ptx60{{.*}}}}
__hmma_m16n16k16_mma_f16f32(dst, src, src, fsrc, 0, 1);
// CHECK_PTX60_SM70: call {{.*}} @llvm.nvvm.wmma.m16n16k16.mma.col.col.f32.f32
// expected-error-re@+1 {{'__hmma_m16n16k16_mma_f32f32' needs target feature sm_70{{.*}},ptx60{{.*}}}}
__hmma_m16n16k16_mma_f32f32(fdst, src, src, fsrc, 3, 0);
// CHECK_PTX60_SM70: call {{.*}} @llvm.nvvm.wmma.m16n16k16.mma.col.col.f32.f32.satfinite
// expected-error-re@+1 {{'__hmma_m16n16k16_mma_f32f32' needs target feature sm_70{{.*}},ptx60{{.*}}}}
__hmma_m16n16k16_mma_f32f32(fdst, src, src, fsrc, 3, 1);
// CHECK_PTX60_SM70: call {{.*}} @llvm.nvvm.wmma.m16n16k16.mma.col.row.f32.f32
// expected-error-re@+1 {{'__hmma_m16n16k16_mma_f32f32' needs target feature sm_70{{.*}},ptx60{{.*}}}}
__hmma_m16n16k16_mma_f32f32(fdst, src, src, fsrc, 2, 0);
// CHECK_PTX60_SM70: call {{.*}} @llvm.nvvm.wmma.m16n16k16.mma.col.row.f32.f32.satfinite
// expected-error-re@+1 {{'__hmma_m16n16k16_mma_f32f32' needs target feature sm_70{{.*}},ptx60{{.*}}}}
__hmma_m16n16k16_mma_f32f32(fdst, src, src, fsrc, 2, 1);
// CHECK_PTX60_SM70: call {{.*}} @llvm.nvvm.wmma.m16n16k16.mma.row.col.f32.f32
// expected-error-re@+1 {{'__hmma_m16n16k16_mma_f32f32' needs target feature sm_70{{.*}},ptx60{{.*}}}}
__hmma_m16n16k16_mma_f32f32(fdst, src, src, fsrc, 1, 0);
// CHECK_PTX60_SM70: call {{.*}} @llvm.nvvm.wmma.m16n16k16.mma.row.col.f32.f32.satfinite
// expected-error-re@+1 {{'__hmma_m16n16k16_mma_f32f32' needs target feature sm_70{{.*}},ptx60{{.*}}}}
__hmma_m16n16k16_mma_f32f32(fdst, src, src, fsrc, 1, 1);
// CHECK_PTX60_SM70: call {{.*}} @llvm.nvvm.wmma.m16n16k16.mma.row.row.f32.f32
// expected-error-re@+1 {{'__hmma_m16n16k16_mma_f32f32' needs target feature sm_70{{.*}},ptx60{{.*}}}}
__hmma_m16n16k16_mma_f32f32(fdst, src, src, fsrc, 0, 0);
// CHECK_PTX60_SM70: call {{.*}} @llvm.nvvm.wmma.m16n16k16.mma.row.row.f32.f32.satfinite
// expected-error-re@+1 {{'__hmma_m16n16k16_mma_f32f32' needs target feature sm_70{{.*}},ptx60{{.*}}}}
__hmma_m16n16k16_mma_f32f32(fdst, src, src, fsrc, 0, 1);
#endif // (PTX >= 60) && (SM >= 70)
#if (PTX >= 61) && (SM >= 70)
// CHECK_PTX61_SM70: call {{.*}} @llvm.nvvm.wmma.m32n8k16.load.a.col.stride.f16
// expected-error-re@+1 {{'__hmma_m32n8k16_ld_a' needs target feature sm_70{{.*}},ptx61{{.*}}}}
__hmma_m32n8k16_ld_a(dst, src, ldm, 1);
// CHECK_PTX61_SM70: call {{.*}} @llvm.nvvm.wmma.m32n8k16.load.a.row.stride.f16
// expected-error-re@+1 {{'__hmma_m32n8k16_ld_a' needs target feature sm_70{{.*}},ptx61{{.*}}}}
__hmma_m32n8k16_ld_a(dst, src, ldm, 0);
// CHECK_PTX61_SM70: call {{.*}} @llvm.nvvm.wmma.m32n8k16.load.b.col.stride.f16
// expected-error-re@+1 {{'__hmma_m32n8k16_ld_b' needs target feature sm_70{{.*}},ptx61{{.*}}}}
__hmma_m32n8k16_ld_b(dst, src, ldm, 1);
// CHECK_PTX61_SM70: call {{.*}} @llvm.nvvm.wmma.m32n8k16.load.b.row.stride.f16
// expected-error-re@+1 {{'__hmma_m32n8k16_ld_b' needs target feature sm_70{{.*}},ptx61{{.*}}}}
__hmma_m32n8k16_ld_b(dst, src, ldm, 0);
// CHECK_PTX61_SM70: call {{.*}} @llvm.nvvm.wmma.m32n8k16.load.c.col.stride.f16
// expected-error-re@+1 {{'__hmma_m32n8k16_ld_c_f16' needs target feature sm_70{{.*}},ptx61{{.*}}}}
__hmma_m32n8k16_ld_c_f16(dst, src, ldm, 1);
// CHECK_PTX61_SM70: call {{.*}} @llvm.nvvm.wmma.m32n8k16.load.c.row.stride.f16
// expected-error-re@+1 {{'__hmma_m32n8k16_ld_c_f16' needs target feature sm_70{{.*}},ptx61{{.*}}}}
__hmma_m32n8k16_ld_c_f16(dst, src, ldm, 0);
// CHECK_PTX61_SM70: call {{.*}} @llvm.nvvm.wmma.m32n8k16.load.c.col.stride.f32
// expected-error-re@+1 {{'__hmma_m32n8k16_ld_c_f32' needs target feature sm_70{{.*}},ptx61{{.*}}}}
__hmma_m32n8k16_ld_c_f32(fdst, fsrc, ldm, 1);
// CHECK_PTX61_SM70: call {{.*}} @llvm.nvvm.wmma.m32n8k16.load.c.row.stride.f32
// expected-error-re@+1 {{'__hmma_m32n8k16_ld_c_f32' needs target feature sm_70{{.*}},ptx61{{.*}}}}
__hmma_m32n8k16_ld_c_f32(fdst, fsrc, ldm, 0);
// CHECK_PTX61_SM70: call {{.*}} @llvm.nvvm.wmma.m32n8k16.store.d.col.stride.f16
// expected-error-re@+1 {{'__hmma_m32n8k16_st_c_f16' needs target feature sm_70{{.*}},ptx61{{.*}}}}
__hmma_m32n8k16_st_c_f16(dst, src, ldm, 1);
// CHECK_PTX61_SM70: call {{.*}} @llvm.nvvm.wmma.m32n8k16.store.d.row.stride.f16
// expected-error-re@+1 {{'__hmma_m32n8k16_st_c_f16' needs target feature sm_70{{.*}},ptx61{{.*}}}}
__hmma_m32n8k16_st_c_f16(dst, src, ldm, 0);
// CHECK_PTX61_SM70: call {{.*}} @llvm.nvvm.wmma.m32n8k16.store.d.col.stride.f32
// expected-error-re@+1 {{'__hmma_m32n8k16_st_c_f32' needs target feature sm_70{{.*}},ptx61{{.*}}}}
__hmma_m32n8k16_st_c_f32(fdst, fsrc, ldm, 1);
// CHECK_PTX61_SM70: call {{.*}} @llvm.nvvm.wmma.m32n8k16.store.d.row.stride.f32
// expected-error-re@+1 {{'__hmma_m32n8k16_st_c_f32' needs target feature sm_70{{.*}},ptx61{{.*}}}}
__hmma_m32n8k16_st_c_f32(fdst, fsrc, ldm, 0);
// CHECK_PTX61_SM70: call {{.*}} @llvm.nvvm.wmma.m8n32k16.load.a.col.stride.f16
// expected-error-re@+1 {{'__hmma_m8n32k16_ld_a' needs target feature sm_70{{.*}},ptx61{{.*}}}}
__hmma_m8n32k16_ld_a(dst, src, ldm, 1);
// CHECK_PTX61_SM70: call {{.*}} @llvm.nvvm.wmma.m8n32k16.load.a.row.stride.f16
// expected-error-re@+1 {{'__hmma_m8n32k16_ld_a' needs target feature sm_70{{.*}},ptx61{{.*}}}}
__hmma_m8n32k16_ld_a(dst, src, ldm, 0);
// CHECK_PTX61_SM70: call {{.*}} @llvm.nvvm.wmma.m8n32k16.load.b.col.stride.f16
// expected-error-re@+1 {{'__hmma_m8n32k16_ld_b' needs target feature sm_70{{.*}},ptx61{{.*}}}}
__hmma_m8n32k16_ld_b(dst, src, ldm, 1);
// CHECK_PTX61_SM70: call {{.*}} @llvm.nvvm.wmma.m8n32k16.load.b.row.stride.f16
// expected-error-re@+1 {{'__hmma_m8n32k16_ld_b' needs target feature sm_70{{.*}},ptx61{{.*}}}}
__hmma_m8n32k16_ld_b(dst, src, ldm, 0);
// CHECK_PTX61_SM70: call {{.*}} @llvm.nvvm.wmma.m8n32k16.load.c.col.stride.f16
// expected-error-re@+1 {{'__hmma_m8n32k16_ld_c_f16' needs target feature sm_70{{.*}},ptx61{{.*}}}}
__hmma_m8n32k16_ld_c_f16(dst, src, ldm, 1);
// CHECK_PTX61_SM70: call {{.*}} @llvm.nvvm.wmma.m8n32k16.load.c.row.stride.f16
// expected-error-re@+1 {{'__hmma_m8n32k16_ld_c_f16' needs target feature sm_70{{.*}},ptx61{{.*}}}}
__hmma_m8n32k16_ld_c_f16(dst, src, ldm, 0);
// CHECK_PTX61_SM70: call {{.*}} @llvm.nvvm.wmma.m8n32k16.load.c.col.stride.f32
// expected-error-re@+1 {{'__hmma_m8n32k16_ld_c_f32' needs target feature sm_70{{.*}},ptx61{{.*}}}}
__hmma_m8n32k16_ld_c_f32(fdst, fsrc, ldm, 1);
// CHECK_PTX61_SM70: call {{.*}} @llvm.nvvm.wmma.m8n32k16.load.c.row.stride.f32
// expected-error-re@+1 {{'__hmma_m8n32k16_ld_c_f32' needs target feature sm_70{{.*}},ptx61{{.*}}}}
__hmma_m8n32k16_ld_c_f32(fdst, fsrc, ldm, 0);
// CHECK_PTX61_SM70: call {{.*}} @llvm.nvvm.wmma.m8n32k16.store.d.col.stride.f16
// expected-error-re@+1 {{'__hmma_m8n32k16_st_c_f16' needs target feature sm_70{{.*}},ptx61{{.*}}}}
__hmma_m8n32k16_st_c_f16(dst, src, ldm, 1);
// CHECK_PTX61_SM70: call {{.*}} @llvm.nvvm.wmma.m8n32k16.store.d.row.stride.f16
// expected-error-re@+1 {{'__hmma_m8n32k16_st_c_f16' needs target feature sm_70{{.*}},ptx61{{.*}}}}
__hmma_m8n32k16_st_c_f16(dst, src, ldm, 0);
// CHECK_PTX61_SM70: call {{.*}} @llvm.nvvm.wmma.m8n32k16.store.d.col.stride.f32
// expected-error-re@+1 {{'__hmma_m8n32k16_st_c_f32' needs target feature sm_70{{.*}},ptx61{{.*}}}}
__hmma_m8n32k16_st_c_f32(fdst, fsrc, ldm, 1);
// CHECK_PTX61_SM70: call {{.*}} @llvm.nvvm.wmma.m8n32k16.store.d.row.stride.f32
// expected-error-re@+1 {{'__hmma_m8n32k16_st_c_f32' needs target feature sm_70{{.*}},ptx61{{.*}}}}
__hmma_m8n32k16_st_c_f32(fdst, fsrc, ldm, 0);
// CHECK_PTX61_SM70: call {{.*}} @llvm.nvvm.wmma.m32n8k16.mma.col.col.f16.f16
// expected-error-re@+1 {{'__hmma_m32n8k16_mma_f16f16' needs target feature sm_70{{.*}},ptx61{{.*}}}}
__hmma_m32n8k16_mma_f16f16(dst, src, src, src, 3, 0);
// CHECK_PTX61_SM70: call {{.*}} @llvm.nvvm.wmma.m32n8k16.mma.col.col.f16.f16.satfinite
// expected-error-re@+1 {{'__hmma_m32n8k16_mma_f16f16' needs target feature sm_70{{.*}},ptx61{{.*}}}}
__hmma_m32n8k16_mma_f16f16(dst, src, src, src, 3, 1);
// CHECK_PTX61_SM70: call {{.*}} @llvm.nvvm.wmma.m32n8k16.mma.col.row.f16.f16
// expected-error-re@+1 {{'__hmma_m32n8k16_mma_f16f16' needs target feature sm_70{{.*}},ptx61{{.*}}}}
__hmma_m32n8k16_mma_f16f16(dst, src, src, src, 2, 0);
// CHECK_PTX61_SM70: call {{.*}} @llvm.nvvm.wmma.m32n8k16.mma.col.row.f16.f16.satfinite
// expected-error-re@+1 {{'__hmma_m32n8k16_mma_f16f16' needs target feature sm_70{{.*}},ptx61{{.*}}}}
__hmma_m32n8k16_mma_f16f16(dst, src, src, src, 2, 1);
// CHECK_PTX61_SM70: call {{.*}} @llvm.nvvm.wmma.m32n8k16.mma.row.col.f16.f16
// expected-error-re@+1 {{'__hmma_m32n8k16_mma_f16f16' needs target feature sm_70{{.*}},ptx61{{.*}}}}
__hmma_m32n8k16_mma_f16f16(dst, src, src, src, 1, 0);
// CHECK_PTX61_SM70: call {{.*}} @llvm.nvvm.wmma.m32n8k16.mma.row.col.f16.f16.satfinite
// expected-error-re@+1 {{'__hmma_m32n8k16_mma_f16f16' needs target feature sm_70{{.*}},ptx61{{.*}}}}
__hmma_m32n8k16_mma_f16f16(dst, src, src, src, 1, 1);
// CHECK_PTX61_SM70: call {{.*}} @llvm.nvvm.wmma.m32n8k16.mma.row.row.f16.f16
// expected-error-re@+1 {{'__hmma_m32n8k16_mma_f16f16' needs target feature sm_70{{.*}},ptx61{{.*}}}}
__hmma_m32n8k16_mma_f16f16(dst, src, src, src, 0, 0);
// CHECK_PTX61_SM70: call {{.*}} @llvm.nvvm.wmma.m32n8k16.mma.row.row.f16.f16.satfinite
// expected-error-re@+1 {{'__hmma_m32n8k16_mma_f16f16' needs target feature sm_70{{.*}},ptx61{{.*}}}}
__hmma_m32n8k16_mma_f16f16(dst, src, src, src, 0, 1);
// CHECK_PTX61_SM70: call {{.*}} @llvm.nvvm.wmma.m32n8k16.mma.col.col.f32.f16
// expected-error-re@+1 {{'__hmma_m32n8k16_mma_f32f16' needs target feature sm_70{{.*}},ptx61{{.*}}}}
__hmma_m32n8k16_mma_f32f16(fdst, src, src, src, 3, 0);
// CHECK_PTX61_SM70: call {{.*}} @llvm.nvvm.wmma.m32n8k16.mma.col.col.f32.f16.satfinite
// expected-error-re@+1 {{'__hmma_m32n8k16_mma_f32f16' needs target feature sm_70{{.*}},ptx61{{.*}}}}
__hmma_m32n8k16_mma_f32f16(fdst, src, src, src, 3, 1);
// CHECK_PTX61_SM70: call {{.*}} @llvm.nvvm.wmma.m32n8k16.mma.col.row.f32.f16
// expected-error-re@+1 {{'__hmma_m32n8k16_mma_f32f16' needs target feature sm_70{{.*}},ptx61{{.*}}}}
__hmma_m32n8k16_mma_f32f16(fdst, src, src, src, 2, 0);
// CHECK_PTX61_SM70: call {{.*}} @llvm.nvvm.wmma.m32n8k16.mma.col.row.f32.f16.satfinite
// expected-error-re@+1 {{'__hmma_m32n8k16_mma_f32f16' needs target feature sm_70{{.*}},ptx61{{.*}}}}
__hmma_m32n8k16_mma_f32f16(fdst, src, src, src, 2, 1);
// CHECK_PTX61_SM70: call {{.*}} @llvm.nvvm.wmma.m32n8k16.mma.row.col.f32.f16
// expected-error-re@+1 {{'__hmma_m32n8k16_mma_f32f16' needs target feature sm_70{{.*}},ptx61{{.*}}}}
__hmma_m32n8k16_mma_f32f16(fdst, src, src, src, 1, 0);
// CHECK_PTX61_SM70: call {{.*}} @llvm.nvvm.wmma.m32n8k16.mma.row.col.f32.f16.satfinite
// expected-error-re@+1 {{'__hmma_m32n8k16_mma_f32f16' needs target feature sm_70{{.*}},ptx61{{.*}}}}
__hmma_m32n8k16_mma_f32f16(fdst, src, src, src, 1, 1);
// CHECK_PTX61_SM70: call {{.*}} @llvm.nvvm.wmma.m32n8k16.mma.row.row.f32.f16
// expected-error-re@+1 {{'__hmma_m32n8k16_mma_f32f16' needs target feature sm_70{{.*}},ptx61{{.*}}}}
__hmma_m32n8k16_mma_f32f16(fdst, src, src, src, 0, 0);
// CHECK_PTX61_SM70: call {{.*}} @llvm.nvvm.wmma.m32n8k16.mma.row.row.f32.f16.satfinite
// expected-error-re@+1 {{'__hmma_m32n8k16_mma_f32f16' needs target feature sm_70{{.*}},ptx61{{.*}}}}
__hmma_m32n8k16_mma_f32f16(fdst, src, src, src, 0, 1);
// CHECK_PTX61_SM70: call {{.*}} @llvm.nvvm.wmma.m32n8k16.mma.col.col.f16.f32
// expected-error-re@+1 {{'__hmma_m32n8k16_mma_f16f32' needs target feature sm_70{{.*}},ptx61{{.*}}}}
__hmma_m32n8k16_mma_f16f32(dst, src, src, fsrc, 3, 0);
// CHECK_PTX61_SM70: call {{.*}} @llvm.nvvm.wmma.m32n8k16.mma.col.col.f16.f32.satfinite
// expected-error-re@+1 {{'__hmma_m32n8k16_mma_f16f32' needs target feature sm_70{{.*}},ptx61{{.*}}}}
__hmma_m32n8k16_mma_f16f32(dst, src, src, fsrc, 3, 1);
// CHECK_PTX61_SM70: call {{.*}} @llvm.nvvm.wmma.m32n8k16.mma.col.row.f16.f32
// expected-error-re@+1 {{'__hmma_m32n8k16_mma_f16f32' needs target feature sm_70{{.*}},ptx61{{.*}}}}
__hmma_m32n8k16_mma_f16f32(dst, src, src, fsrc, 2, 0);
// CHECK_PTX61_SM70: call {{.*}} @llvm.nvvm.wmma.m32n8k16.mma.col.row.f16.f32.satfinite
// expected-error-re@+1 {{'__hmma_m32n8k16_mma_f16f32' needs target feature sm_70{{.*}},ptx61{{.*}}}}
__hmma_m32n8k16_mma_f16f32(dst, src, src, fsrc, 2, 1);
// CHECK_PTX61_SM70: call {{.*}} @llvm.nvvm.wmma.m32n8k16.mma.row.col.f16.f32
// expected-error-re@+1 {{'__hmma_m32n8k16_mma_f16f32' needs target feature sm_70{{.*}},ptx61{{.*}}}}
__hmma_m32n8k16_mma_f16f32(dst, src, src, fsrc, 1, 0);
// CHECK_PTX61_SM70: call {{.*}} @llvm.nvvm.wmma.m32n8k16.mma.row.col.f16.f32.satfinite
// expected-error-re@+1 {{'__hmma_m32n8k16_mma_f16f32' needs target feature sm_70{{.*}},ptx61{{.*}}}}
__hmma_m32n8k16_mma_f16f32(dst, src, src, fsrc, 1, 1);
// CHECK_PTX61_SM70: call {{.*}} @llvm.nvvm.wmma.m32n8k16.mma.row.row.f16.f32
// expected-error-re@+1 {{'__hmma_m32n8k16_mma_f16f32' needs target feature sm_70{{.*}},ptx61{{.*}}}}
__hmma_m32n8k16_mma_f16f32(dst, src, src, fsrc, 0, 0);
// CHECK_PTX61_SM70: call {{.*}} @llvm.nvvm.wmma.m32n8k16.mma.row.row.f16.f32.satfinite
// expected-error-re@+1 {{'__hmma_m32n8k16_mma_f16f32' needs target feature sm_70{{.*}},ptx61{{.*}}}}
__hmma_m32n8k16_mma_f16f32(dst, src, src, fsrc, 0, 1);
// CHECK_PTX61_SM70: call {{.*}} @llvm.nvvm.wmma.m32n8k16.mma.col.col.f32.f32
// expected-error-re@+1 {{'__hmma_m32n8k16_mma_f32f32' needs target feature sm_70{{.*}},ptx61{{.*}}}}
__hmma_m32n8k16_mma_f32f32(fdst, src, src, fsrc, 3, 0);
// CHECK_PTX61_SM70: call {{.*}} @llvm.nvvm.wmma.m32n8k16.mma.col.col.f32.f32.satfinite
// expected-error-re@+1 {{'__hmma_m32n8k16_mma_f32f32' needs target feature sm_70{{.*}},ptx61{{.*}}}}
__hmma_m32n8k16_mma_f32f32(fdst, src, src, fsrc, 3, 1);
// CHECK_PTX61_SM70: call {{.*}} @llvm.nvvm.wmma.m32n8k16.mma.col.row.f32.f32
// expected-error-re@+1 {{'__hmma_m32n8k16_mma_f32f32' needs target feature sm_70{{.*}},ptx61{{.*}}}}
__hmma_m32n8k16_mma_f32f32(fdst, src, src, fsrc, 2, 0);
// CHECK_PTX61_SM70: call {{.*}} @llvm.nvvm.wmma.m32n8k16.mma.col.row.f32.f32.satfinite
// expected-error-re@+1 {{'__hmma_m32n8k16_mma_f32f32' needs target feature sm_70{{.*}},ptx61{{.*}}}}
__hmma_m32n8k16_mma_f32f32(fdst, src, src, fsrc, 2, 1);
// CHECK_PTX61_SM70: call {{.*}} @llvm.nvvm.wmma.m32n8k16.mma.row.col.f32.f32
// expected-error-re@+1 {{'__hmma_m32n8k16_mma_f32f32' needs target feature sm_70{{.*}},ptx61{{.*}}}}
__hmma_m32n8k16_mma_f32f32(fdst, src, src, fsrc, 1, 0);
// CHECK_PTX61_SM70: call {{.*}} @llvm.nvvm.wmma.m32n8k16.mma.row.col.f32.f32.satfinite
// expected-error-re@+1 {{'__hmma_m32n8k16_mma_f32f32' needs target feature sm_70{{.*}},ptx61{{.*}}}}
__hmma_m32n8k16_mma_f32f32(fdst, src, src, fsrc, 1, 1);
// CHECK_PTX61_SM70: call {{.*}} @llvm.nvvm.wmma.m32n8k16.mma.row.row.f32.f32
// expected-error-re@+1 {{'__hmma_m32n8k16_mma_f32f32' needs target feature sm_70{{.*}},ptx61{{.*}}}}
__hmma_m32n8k16_mma_f32f32(fdst, src, src, fsrc, 0, 0);
// CHECK_PTX61_SM70: call {{.*}} @llvm.nvvm.wmma.m32n8k16.mma.row.row.f32.f32.satfinite
// expected-error-re@+1 {{'__hmma_m32n8k16_mma_f32f32' needs target feature sm_70{{.*}},ptx61{{.*}}}}
__hmma_m32n8k16_mma_f32f32(fdst, src, src, fsrc, 0, 1);
// CHECK_PTX61_SM70: call {{.*}} @llvm.nvvm.wmma.m8n32k16.mma.col.col.f16.f16
// expected-error-re@+1 {{'__hmma_m8n32k16_mma_f16f16' needs target feature sm_70{{.*}},ptx61{{.*}}}}
__hmma_m8n32k16_mma_f16f16(dst, src, src, src, 3, 0);
// CHECK_PTX61_SM70: call {{.*}} @llvm.nvvm.wmma.m8n32k16.mma.col.col.f16.f16.satfinite
// expected-error-re@+1 {{'__hmma_m8n32k16_mma_f16f16' needs target feature sm_70{{.*}},ptx61{{.*}}}}
__hmma_m8n32k16_mma_f16f16(dst, src, src, src, 3, 1);
// CHECK_PTX61_SM70: call {{.*}} @llvm.nvvm.wmma.m8n32k16.mma.col.row.f16.f16
// expected-error-re@+1 {{'__hmma_m8n32k16_mma_f16f16' needs target feature sm_70{{.*}},ptx61{{.*}}}}
__hmma_m8n32k16_mma_f16f16(dst, src, src, src, 2, 0);
// CHECK_PTX61_SM70: call {{.*}} @llvm.nvvm.wmma.m8n32k16.mma.col.row.f16.f16.satfinite
// expected-error-re@+1 {{'__hmma_m8n32k16_mma_f16f16' needs target feature sm_70{{.*}},ptx61{{.*}}}}
__hmma_m8n32k16_mma_f16f16(dst, src, src, src, 2, 1);
// CHECK_PTX61_SM70: call {{.*}} @llvm.nvvm.wmma.m8n32k16.mma.row.col.f16.f16
// expected-error-re@+1 {{'__hmma_m8n32k16_mma_f16f16' needs target feature sm_70{{.*}},ptx61{{.*}}}}
__hmma_m8n32k16_mma_f16f16(dst, src, src, src, 1, 0);
// CHECK_PTX61_SM70: call {{.*}} @llvm.nvvm.wmma.m8n32k16.mma.row.col.f16.f16.satfinite
// expected-error-re@+1 {{'__hmma_m8n32k16_mma_f16f16' needs target feature sm_70{{.*}},ptx61{{.*}}}}
__hmma_m8n32k16_mma_f16f16(dst, src, src, src, 1, 1);
// CHECK_PTX61_SM70: call {{.*}} @llvm.nvvm.wmma.m8n32k16.mma.row.row.f16.f16
// expected-error-re@+1 {{'__hmma_m8n32k16_mma_f16f16' needs target feature sm_70{{.*}},ptx61{{.*}}}}
__hmma_m8n32k16_mma_f16f16(dst, src, src, src, 0, 0);
// CHECK_PTX61_SM70: call {{.*}} @llvm.nvvm.wmma.m8n32k16.mma.row.row.f16.f16.satfinite
// expected-error-re@+1 {{'__hmma_m8n32k16_mma_f16f16' needs target feature sm_70{{.*}},ptx61{{.*}}}}
__hmma_m8n32k16_mma_f16f16(dst, src, src, src, 0, 1);
// CHECK_PTX61_SM70: call {{.*}} @llvm.nvvm.wmma.m8n32k16.mma.col.col.f32.f16
// expected-error-re@+1 {{'__hmma_m8n32k16_mma_f32f16' needs target feature sm_70{{.*}},ptx61{{.*}}}}
__hmma_m8n32k16_mma_f32f16(fdst, src, src, src, 3, 0);
// CHECK_PTX61_SM70: call {{.*}} @llvm.nvvm.wmma.m8n32k16.mma.col.col.f32.f16.satfinite
// expected-error-re@+1 {{'__hmma_m8n32k16_mma_f32f16' needs target feature sm_70{{.*}},ptx61{{.*}}}}
__hmma_m8n32k16_mma_f32f16(fdst, src, src, src, 3, 1);
// CHECK_PTX61_SM70: call {{.*}} @llvm.nvvm.wmma.m8n32k16.mma.col.row.f32.f16
// expected-error-re@+1 {{'__hmma_m8n32k16_mma_f32f16' needs target feature sm_70{{.*}},ptx61{{.*}}}}
__hmma_m8n32k16_mma_f32f16(fdst, src, src, src, 2, 0);
// CHECK_PTX61_SM70: call {{.*}} @llvm.nvvm.wmma.m8n32k16.mma.col.row.f32.f16.satfinite
// expected-error-re@+1 {{'__hmma_m8n32k16_mma_f32f16' needs target feature sm_70{{.*}},ptx61{{.*}}}}
__hmma_m8n32k16_mma_f32f16(fdst, src, src, src, 2, 1);
// CHECK_PTX61_SM70: call {{.*}} @llvm.nvvm.wmma.m8n32k16.mma.row.col.f32.f16
// expected-error-re@+1 {{'__hmma_m8n32k16_mma_f32f16' needs target feature sm_70{{.*}},ptx61{{.*}}}}
__hmma_m8n32k16_mma_f32f16(fdst, src, src, src, 1, 0);
// CHECK_PTX61_SM70: call {{.*}} @llvm.nvvm.wmma.m8n32k16.mma.row.col.f32.f16.satfinite
// expected-error-re@+1 {{'__hmma_m8n32k16_mma_f32f16' needs target feature sm_70{{.*}},ptx61{{.*}}}}
__hmma_m8n32k16_mma_f32f16(fdst, src, src, src, 1, 1);
// CHECK_PTX61_SM70: call {{.*}} @llvm.nvvm.wmma.m8n32k16.mma.row.row.f32.f16
// expected-error-re@+1 {{'__hmma_m8n32k16_mma_f32f16' needs target feature sm_70{{.*}},ptx61{{.*}}}}
__hmma_m8n32k16_mma_f32f16(fdst, src, src, src, 0, 0);
// CHECK_PTX61_SM70: call {{.*}} @llvm.nvvm.wmma.m8n32k16.mma.row.row.f32.f16.satfinite
// expected-error-re@+1 {{'__hmma_m8n32k16_mma_f32f16' needs target feature sm_70{{.*}},ptx61{{.*}}}}
__hmma_m8n32k16_mma_f32f16(fdst, src, src, src, 0, 1);
// CHECK_PTX61_SM70: call {{.*}} @llvm.nvvm.wmma.m8n32k16.mma.col.col.f16.f32
// expected-error-re@+1 {{'__hmma_m8n32k16_mma_f16f32' needs target feature sm_70{{.*}},ptx61{{.*}}}}
__hmma_m8n32k16_mma_f16f32(dst, src, src, fsrc, 3, 0);
// CHECK_PTX61_SM70: call {{.*}} @llvm.nvvm.wmma.m8n32k16.mma.col.col.f16.f32.satfinite
// expected-error-re@+1 {{'__hmma_m8n32k16_mma_f16f32' needs target feature sm_70{{.*}},ptx61{{.*}}}}
__hmma_m8n32k16_mma_f16f32(dst, src, src, fsrc, 3, 1);
// CHECK_PTX61_SM70: call {{.*}} @llvm.nvvm.wmma.m8n32k16.mma.col.row.f16.f32
// expected-error-re@+1 {{'__hmma_m8n32k16_mma_f16f32' needs target feature sm_70{{.*}},ptx61{{.*}}}}
__hmma_m8n32k16_mma_f16f32(dst, src, src, fsrc, 2, 0);
// CHECK_PTX61_SM70: call {{.*}} @llvm.nvvm.wmma.m8n32k16.mma.col.row.f16.f32.satfinite
// expected-error-re@+1 {{'__hmma_m8n32k16_mma_f16f32' needs target feature sm_70{{.*}},ptx61{{.*}}}}
__hmma_m8n32k16_mma_f16f32(dst, src, src, fsrc, 2, 1);
// CHECK_PTX61_SM70: call {{.*}} @llvm.nvvm.wmma.m8n32k16.mma.row.col.f16.f32
// expected-error-re@+1 {{'__hmma_m8n32k16_mma_f16f32' needs target feature sm_70{{.*}},ptx61{{.*}}}}
__hmma_m8n32k16_mma_f16f32(dst, src, src, fsrc, 1, 0);
// CHECK_PTX61_SM70: call {{.*}} @llvm.nvvm.wmma.m8n32k16.mma.row.col.f16.f32.satfinite
// expected-error-re@+1 {{'__hmma_m8n32k16_mma_f16f32' needs target feature sm_70{{.*}},ptx61{{.*}}}}
__hmma_m8n32k16_mma_f16f32(dst, src, src, fsrc, 1, 1);
// CHECK_PTX61_SM70: call {{.*}} @llvm.nvvm.wmma.m8n32k16.mma.row.row.f16.f32
// expected-error-re@+1 {{'__hmma_m8n32k16_mma_f16f32' needs target feature sm_70{{.*}},ptx61{{.*}}}}
__hmma_m8n32k16_mma_f16f32(dst, src, src, fsrc, 0, 0);
// CHECK_PTX61_SM70: call {{.*}} @llvm.nvvm.wmma.m8n32k16.mma.row.row.f16.f32.satfinite
// expected-error-re@+1 {{'__hmma_m8n32k16_mma_f16f32' needs target feature sm_70{{.*}},ptx61{{.*}}}}
__hmma_m8n32k16_mma_f16f32(dst, src, src, fsrc, 0, 1);
// CHECK_PTX61_SM70: call {{.*}} @llvm.nvvm.wmma.m8n32k16.mma.col.col.f32.f32
// expected-error-re@+1 {{'__hmma_m8n32k16_mma_f32f32' needs target feature sm_70{{.*}},ptx61{{.*}}}}
__hmma_m8n32k16_mma_f32f32(fdst, src, src, fsrc, 3, 0);
// CHECK_PTX61_SM70: call {{.*}} @llvm.nvvm.wmma.m8n32k16.mma.col.col.f32.f32.satfinite
// expected-error-re@+1 {{'__hmma_m8n32k16_mma_f32f32' needs target feature sm_70{{.*}},ptx61{{.*}}}}
__hmma_m8n32k16_mma_f32f32(fdst, src, src, fsrc, 3, 1);
// CHECK_PTX61_SM70: call {{.*}} @llvm.nvvm.wmma.m8n32k16.mma.col.row.f32.f32
// expected-error-re@+1 {{'__hmma_m8n32k16_mma_f32f32' needs target feature sm_70{{.*}},ptx61{{.*}}}}
__hmma_m8n32k16_mma_f32f32(fdst, src, src, fsrc, 2, 0);
// CHECK_PTX61_SM70: call {{.*}} @llvm.nvvm.wmma.m8n32k16.mma.col.row.f32.f32.satfinite
// expected-error-re@+1 {{'__hmma_m8n32k16_mma_f32f32' needs target feature sm_70{{.*}},ptx61{{.*}}}}
__hmma_m8n32k16_mma_f32f32(fdst, src, src, fsrc, 2, 1);
// CHECK_PTX61_SM70: call {{.*}} @llvm.nvvm.wmma.m8n32k16.mma.row.col.f32.f32
// expected-error-re@+1 {{'__hmma_m8n32k16_mma_f32f32' needs target feature sm_70{{.*}},ptx61{{.*}}}}
__hmma_m8n32k16_mma_f32f32(fdst, src, src, fsrc, 1, 0);
// CHECK_PTX61_SM70: call {{.*}} @llvm.nvvm.wmma.m8n32k16.mma.row.col.f32.f32.satfinite
// expected-error-re@+1 {{'__hmma_m8n32k16_mma_f32f32' needs target feature sm_70{{.*}},ptx61{{.*}}}}
__hmma_m8n32k16_mma_f32f32(fdst, src, src, fsrc, 1, 1);
// CHECK_PTX61_SM70: call {{.*}} @llvm.nvvm.wmma.m8n32k16.mma.row.row.f32.f32
// expected-error-re@+1 {{'__hmma_m8n32k16_mma_f32f32' needs target feature sm_70{{.*}},ptx61{{.*}}}}
__hmma_m8n32k16_mma_f32f32(fdst, src, src, fsrc, 0, 0);
// CHECK_PTX61_SM70: call {{.*}} @llvm.nvvm.wmma.m8n32k16.mma.row.row.f32.f32.satfinite
// expected-error-re@+1 {{'__hmma_m8n32k16_mma_f32f32' needs target feature sm_70{{.*}},ptx61{{.*}}}}
__hmma_m8n32k16_mma_f32f32(fdst, src, src, fsrc, 0, 1);
#endif // (PTX >= 61) && (SM >= 70)
#if (PTX >= 63) && (SM >= 72)
// CHECK_PTX63_SM72: call {{.*}} @llvm.nvvm.wmma.m16n16k16.load.a.col.stride.s8
// expected-error-re@+1 {{'__imma_m16n16k16_ld_a_s8' needs target feature sm_72{{.*}},ptx63{{.*}}}}
__imma_m16n16k16_ld_a_s8(dst, src, ldm, 1);
// CHECK_PTX63_SM72: call {{.*}} @llvm.nvvm.wmma.m16n16k16.load.a.row.stride.s8
// expected-error-re@+1 {{'__imma_m16n16k16_ld_a_s8' needs target feature sm_72{{.*}},ptx63{{.*}}}}
__imma_m16n16k16_ld_a_s8(dst, src, ldm, 0);
// CHECK_PTX63_SM72: call {{.*}} @llvm.nvvm.wmma.m16n16k16.load.a.col.stride.u8
// expected-error-re@+1 {{'__imma_m16n16k16_ld_a_u8' needs target feature sm_72{{.*}},ptx63{{.*}}}}
__imma_m16n16k16_ld_a_u8(dst, src, ldm, 1);
// CHECK_PTX63_SM72: call {{.*}} @llvm.nvvm.wmma.m16n16k16.load.a.row.stride.u8
// expected-error-re@+1 {{'__imma_m16n16k16_ld_a_u8' needs target feature sm_72{{.*}},ptx63{{.*}}}}
__imma_m16n16k16_ld_a_u8(dst, src, ldm, 0);
// CHECK_PTX63_SM72: call {{.*}} @llvm.nvvm.wmma.m16n16k16.load.b.col.stride.s8
// expected-error-re@+1 {{'__imma_m16n16k16_ld_b_s8' needs target feature sm_72{{.*}},ptx63{{.*}}}}
__imma_m16n16k16_ld_b_s8(dst, src, ldm, 1);
// CHECK_PTX63_SM72: call {{.*}} @llvm.nvvm.wmma.m16n16k16.load.b.row.stride.s8
// expected-error-re@+1 {{'__imma_m16n16k16_ld_b_s8' needs target feature sm_72{{.*}},ptx63{{.*}}}}
__imma_m16n16k16_ld_b_s8(dst, src, ldm, 0);
// CHECK_PTX63_SM72: call {{.*}} @llvm.nvvm.wmma.m16n16k16.load.b.col.stride.u8
// expected-error-re@+1 {{'__imma_m16n16k16_ld_b_u8' needs target feature sm_72{{.*}},ptx63{{.*}}}}
__imma_m16n16k16_ld_b_u8(dst, src, ldm, 1);
// CHECK_PTX63_SM72: call {{.*}} @llvm.nvvm.wmma.m16n16k16.load.b.row.stride.u8
// expected-error-re@+1 {{'__imma_m16n16k16_ld_b_u8' needs target feature sm_72{{.*}},ptx63{{.*}}}}
__imma_m16n16k16_ld_b_u8(dst, src, ldm, 0);
// CHECK_PTX63_SM72: call {{.*}} @llvm.nvvm.wmma.m16n16k16.load.c.col.stride.s32
// expected-error-re@+1 {{'__imma_m16n16k16_ld_c' needs target feature sm_72{{.*}},ptx63{{.*}}}}
__imma_m16n16k16_ld_c(dst, src, ldm, 1);
// CHECK_PTX63_SM72: call {{.*}} @llvm.nvvm.wmma.m16n16k16.load.c.row.stride.s32
// expected-error-re@+1 {{'__imma_m16n16k16_ld_c' needs target feature sm_72{{.*}},ptx63{{.*}}}}
__imma_m16n16k16_ld_c(dst, src, ldm, 0);
// CHECK_PTX63_SM72: call {{.*}} @llvm.nvvm.wmma.m16n16k16.store.d.col.stride.s32
// expected-error-re@+1 {{'__imma_m16n16k16_st_c_i32' needs target feature sm_72{{.*}},ptx63{{.*}}}}
__imma_m16n16k16_st_c_i32(dst, src, ldm, 1);
// CHECK_PTX63_SM72: call {{.*}} @llvm.nvvm.wmma.m16n16k16.store.d.row.stride.s32
// expected-error-re@+1 {{'__imma_m16n16k16_st_c_i32' needs target feature sm_72{{.*}},ptx63{{.*}}}}
__imma_m16n16k16_st_c_i32(dst, src, ldm, 0);
// CHECK_PTX63_SM72: call {{.*}} @llvm.nvvm.wmma.m32n8k16.load.a.col.stride.s8
// expected-error-re@+1 {{'__imma_m32n8k16_ld_a_s8' needs target feature sm_72{{.*}},ptx63{{.*}}}}
__imma_m32n8k16_ld_a_s8(dst, src, ldm, 1);
// CHECK_PTX63_SM72: call {{.*}} @llvm.nvvm.wmma.m32n8k16.load.a.row.stride.s8
// expected-error-re@+1 {{'__imma_m32n8k16_ld_a_s8' needs target feature sm_72{{.*}},ptx63{{.*}}}}
__imma_m32n8k16_ld_a_s8(dst, src, ldm, 0);
// CHECK_PTX63_SM72: call {{.*}} @llvm.nvvm.wmma.m32n8k16.load.a.col.stride.u8
// expected-error-re@+1 {{'__imma_m32n8k16_ld_a_u8' needs target feature sm_72{{.*}},ptx63{{.*}}}}
__imma_m32n8k16_ld_a_u8(dst, src, ldm, 1);
// CHECK_PTX63_SM72: call {{.*}} @llvm.nvvm.wmma.m32n8k16.load.a.row.stride.u8
// expected-error-re@+1 {{'__imma_m32n8k16_ld_a_u8' needs target feature sm_72{{.*}},ptx63{{.*}}}}
__imma_m32n8k16_ld_a_u8(dst, src, ldm, 0);
// CHECK_PTX63_SM72: call {{.*}} @llvm.nvvm.wmma.m32n8k16.load.b.col.stride.s8
// expected-error-re@+1 {{'__imma_m32n8k16_ld_b_s8' needs target feature sm_72{{.*}},ptx63{{.*}}}}
__imma_m32n8k16_ld_b_s8(dst, src, ldm, 1);
// CHECK_PTX63_SM72: call {{.*}} @llvm.nvvm.wmma.m32n8k16.load.b.row.stride.s8
// expected-error-re@+1 {{'__imma_m32n8k16_ld_b_s8' needs target feature sm_72{{.*}},ptx63{{.*}}}}
__imma_m32n8k16_ld_b_s8(dst, src, ldm, 0);
// CHECK_PTX63_SM72: call {{.*}} @llvm.nvvm.wmma.m32n8k16.load.b.col.stride.u8
// expected-error-re@+1 {{'__imma_m32n8k16_ld_b_u8' needs target feature sm_72{{.*}},ptx63{{.*}}}}
__imma_m32n8k16_ld_b_u8(dst, src, ldm, 1);
// CHECK_PTX63_SM72: call {{.*}} @llvm.nvvm.wmma.m32n8k16.load.b.row.stride.u8
// expected-error-re@+1 {{'__imma_m32n8k16_ld_b_u8' needs target feature sm_72{{.*}},ptx63{{.*}}}}
__imma_m32n8k16_ld_b_u8(dst, src, ldm, 0);
// CHECK_PTX63_SM72: call {{.*}} @llvm.nvvm.wmma.m32n8k16.load.c.col.stride.s32
// expected-error-re@+1 {{'__imma_m32n8k16_ld_c' needs target feature sm_72{{.*}},ptx63{{.*}}}}
__imma_m32n8k16_ld_c(dst, src, ldm, 1);
// CHECK_PTX63_SM72: call {{.*}} @llvm.nvvm.wmma.m32n8k16.load.c.row.stride.s32
// expected-error-re@+1 {{'__imma_m32n8k16_ld_c' needs target feature sm_72{{.*}},ptx63{{.*}}}}
__imma_m32n8k16_ld_c(dst, src, ldm, 0);
// CHECK_PTX63_SM72: call {{.*}} @llvm.nvvm.wmma.m32n8k16.store.d.col.stride.s32
// expected-error-re@+1 {{'__imma_m32n8k16_st_c_i32' needs target feature sm_72{{.*}},ptx63{{.*}}}}
__imma_m32n8k16_st_c_i32(dst, src, ldm, 1);
// CHECK_PTX63_SM72: call {{.*}} @llvm.nvvm.wmma.m32n8k16.store.d.row.stride.s32
// expected-error-re@+1 {{'__imma_m32n8k16_st_c_i32' needs target feature sm_72{{.*}},ptx63{{.*}}}}
__imma_m32n8k16_st_c_i32(dst, src, ldm, 0);
// CHECK_PTX63_SM72: call {{.*}} @llvm.nvvm.wmma.m8n32k16.load.a.col.stride.s8
// expected-error-re@+1 {{'__imma_m8n32k16_ld_a_s8' needs target feature sm_72{{.*}},ptx63{{.*}}}}
__imma_m8n32k16_ld_a_s8(dst, src, ldm, 1);
// CHECK_PTX63_SM72: call {{.*}} @llvm.nvvm.wmma.m8n32k16.load.a.row.stride.s8
// expected-error-re@+1 {{'__imma_m8n32k16_ld_a_s8' needs target feature sm_72{{.*}},ptx63{{.*}}}}
__imma_m8n32k16_ld_a_s8(dst, src, ldm, 0);
// CHECK_PTX63_SM72: call {{.*}} @llvm.nvvm.wmma.m8n32k16.load.a.col.stride.u8
// expected-error-re@+1 {{'__imma_m8n32k16_ld_a_u8' needs target feature sm_72{{.*}},ptx63{{.*}}}}
__imma_m8n32k16_ld_a_u8(dst, src, ldm, 1);
// CHECK_PTX63_SM72: call {{.*}} @llvm.nvvm.wmma.m8n32k16.load.a.row.stride.u8
// expected-error-re@+1 {{'__imma_m8n32k16_ld_a_u8' needs target feature sm_72{{.*}},ptx63{{.*}}}}
__imma_m8n32k16_ld_a_u8(dst, src, ldm, 0);
// CHECK_PTX63_SM72: call {{.*}} @llvm.nvvm.wmma.m8n32k16.load.b.col.stride.s8
// expected-error-re@+1 {{'__imma_m8n32k16_ld_b_s8' needs target feature sm_72{{.*}},ptx63{{.*}}}}
__imma_m8n32k16_ld_b_s8(dst, src, ldm, 1);
// CHECK_PTX63_SM72: call {{.*}} @llvm.nvvm.wmma.m8n32k16.load.b.row.stride.s8
// expected-error-re@+1 {{'__imma_m8n32k16_ld_b_s8' needs target feature sm_72{{.*}},ptx63{{.*}}}}
__imma_m8n32k16_ld_b_s8(dst, src, ldm, 0);
// CHECK_PTX63_SM72: call {{.*}} @llvm.nvvm.wmma.m8n32k16.load.b.col.stride.u8
// expected-error-re@+1 {{'__imma_m8n32k16_ld_b_u8' needs target feature sm_72{{.*}},ptx63{{.*}}}}
__imma_m8n32k16_ld_b_u8(dst, src, ldm, 1);
// CHECK_PTX63_SM72: call {{.*}} @llvm.nvvm.wmma.m8n32k16.load.b.row.stride.u8
// expected-error-re@+1 {{'__imma_m8n32k16_ld_b_u8' needs target feature sm_72{{.*}},ptx63{{.*}}}}
__imma_m8n32k16_ld_b_u8(dst, src, ldm, 0);
// CHECK_PTX63_SM72: call {{.*}} @llvm.nvvm.wmma.m8n32k16.load.c.col.stride.s32
// expected-error-re@+1 {{'__imma_m8n32k16_ld_c' needs target feature sm_72{{.*}},ptx63{{.*}}}}
__imma_m8n32k16_ld_c(dst, src, ldm, 1);
// CHECK_PTX63_SM72: call {{.*}} @llvm.nvvm.wmma.m8n32k16.load.c.row.stride.s32
// expected-error-re@+1 {{'__imma_m8n32k16_ld_c' needs target feature sm_72{{.*}},ptx63{{.*}}}}
__imma_m8n32k16_ld_c(dst, src, ldm, 0);
// CHECK_PTX63_SM72: call {{.*}} @llvm.nvvm.wmma.m8n32k16.store.d.col.stride.s32
// expected-error-re@+1 {{'__imma_m8n32k16_st_c_i32' needs target feature sm_72{{.*}},ptx63{{.*}}}}
__imma_m8n32k16_st_c_i32(dst, src, ldm, 1);
// CHECK_PTX63_SM72: call {{.*}} @llvm.nvvm.wmma.m8n32k16.store.d.row.stride.s32
// expected-error-re@+1 {{'__imma_m8n32k16_st_c_i32' needs target feature sm_72{{.*}},ptx63{{.*}}}}
__imma_m8n32k16_st_c_i32(dst, src, ldm, 0);
// CHECK_PTX63_SM72: call {{.*}} @llvm.nvvm.wmma.m16n16k16.mma.col.col.s8
// expected-error-re@+1 {{'__imma_m16n16k16_mma_s8' needs target feature sm_72{{.*}},ptx63{{.*}}}}
__imma_m16n16k16_mma_s8(dst, src, src, src, 3, 0);
// CHECK_PTX63_SM72: call {{.*}} @llvm.nvvm.wmma.m16n16k16.mma.col.col.s8.satfinite
// expected-error-re@+1 {{'__imma_m16n16k16_mma_s8' needs target feature sm_72{{.*}},ptx63{{.*}}}}
__imma_m16n16k16_mma_s8(dst, src, src, src, 3, 1);
// CHECK_PTX63_SM72: call {{.*}} @llvm.nvvm.wmma.m16n16k16.mma.col.row.s8
// expected-error-re@+1 {{'__imma_m16n16k16_mma_s8' needs target feature sm_72{{.*}},ptx63{{.*}}}}
__imma_m16n16k16_mma_s8(dst, src, src, src, 2, 0);
// CHECK_PTX63_SM72: call {{.*}} @llvm.nvvm.wmma.m16n16k16.mma.col.row.s8.satfinite
// expected-error-re@+1 {{'__imma_m16n16k16_mma_s8' needs target feature sm_72{{.*}},ptx63{{.*}}}}
__imma_m16n16k16_mma_s8(dst, src, src, src, 2, 1);
// CHECK_PTX63_SM72: call {{.*}} @llvm.nvvm.wmma.m16n16k16.mma.row.col.s8
// expected-error-re@+1 {{'__imma_m16n16k16_mma_s8' needs target feature sm_72{{.*}},ptx63{{.*}}}}
__imma_m16n16k16_mma_s8(dst, src, src, src, 1, 0);
// CHECK_PTX63_SM72: call {{.*}} @llvm.nvvm.wmma.m16n16k16.mma.row.col.s8.satfinite
// expected-error-re@+1 {{'__imma_m16n16k16_mma_s8' needs target feature sm_72{{.*}},ptx63{{.*}}}}
__imma_m16n16k16_mma_s8(dst, src, src, src, 1, 1);
// CHECK_PTX63_SM72: call {{.*}} @llvm.nvvm.wmma.m16n16k16.mma.row.row.s8
// expected-error-re@+1 {{'__imma_m16n16k16_mma_s8' needs target feature sm_72{{.*}},ptx63{{.*}}}}
__imma_m16n16k16_mma_s8(dst, src, src, src, 0, 0);
// CHECK_PTX63_SM72: call {{.*}} @llvm.nvvm.wmma.m16n16k16.mma.row.row.s8.satfinite
// expected-error-re@+1 {{'__imma_m16n16k16_mma_s8' needs target feature sm_72{{.*}},ptx63{{.*}}}}
__imma_m16n16k16_mma_s8(dst, src, src, src, 0, 1);
// CHECK_PTX63_SM72: call {{.*}} @llvm.nvvm.wmma.m16n16k16.mma.col.col.u8
// expected-error-re@+1 {{'__imma_m16n16k16_mma_u8' needs target feature sm_72{{.*}},ptx63{{.*}}}}
__imma_m16n16k16_mma_u8(dst, src, src, src, 3, 0);
// CHECK_PTX63_SM72: call {{.*}} @llvm.nvvm.wmma.m16n16k16.mma.col.col.u8.satfinite
// expected-error-re@+1 {{'__imma_m16n16k16_mma_u8' needs target feature sm_72{{.*}},ptx63{{.*}}}}
__imma_m16n16k16_mma_u8(dst, src, src, src, 3, 1);
// CHECK_PTX63_SM72: call {{.*}} @llvm.nvvm.wmma.m16n16k16.mma.col.row.u8
// expected-error-re@+1 {{'__imma_m16n16k16_mma_u8' needs target feature sm_72{{.*}},ptx63{{.*}}}}
__imma_m16n16k16_mma_u8(dst, src, src, src, 2, 0);
// CHECK_PTX63_SM72: call {{.*}} @llvm.nvvm.wmma.m16n16k16.mma.col.row.u8.satfinite
// expected-error-re@+1 {{'__imma_m16n16k16_mma_u8' needs target feature sm_72{{.*}},ptx63{{.*}}}}
__imma_m16n16k16_mma_u8(dst, src, src, src, 2, 1);
// CHECK_PTX63_SM72: call {{.*}} @llvm.nvvm.wmma.m16n16k16.mma.row.col.u8
// expected-error-re@+1 {{'__imma_m16n16k16_mma_u8' needs target feature sm_72{{.*}},ptx63{{.*}}}}
__imma_m16n16k16_mma_u8(dst, src, src, src, 1, 0);
// CHECK_PTX63_SM72: call {{.*}} @llvm.nvvm.wmma.m16n16k16.mma.row.col.u8.satfinite
// expected-error-re@+1 {{'__imma_m16n16k16_mma_u8' needs target feature sm_72{{.*}},ptx63{{.*}}}}
__imma_m16n16k16_mma_u8(dst, src, src, src, 1, 1);
// CHECK_PTX63_SM72: call {{.*}} @llvm.nvvm.wmma.m16n16k16.mma.row.row.u8
// expected-error-re@+1 {{'__imma_m16n16k16_mma_u8' needs target feature sm_72{{.*}},ptx63{{.*}}}}
__imma_m16n16k16_mma_u8(dst, src, src, src, 0, 0);
// CHECK_PTX63_SM72: call {{.*}} @llvm.nvvm.wmma.m16n16k16.mma.row.row.u8.satfinite
// expected-error-re@+1 {{'__imma_m16n16k16_mma_u8' needs target feature sm_72{{.*}},ptx63{{.*}}}}
__imma_m16n16k16_mma_u8(dst, src, src, src, 0, 1);
// CHECK_PTX63_SM72: call {{.*}} @llvm.nvvm.wmma.m32n8k16.mma.col.col.s8
// expected-error-re@+1 {{'__imma_m32n8k16_mma_s8' needs target feature sm_72{{.*}},ptx63{{.*}}}}
__imma_m32n8k16_mma_s8(dst, src, src, src, 3, 0);
// CHECK_PTX63_SM72: call {{.*}} @llvm.nvvm.wmma.m32n8k16.mma.col.col.s8.satfinite
// expected-error-re@+1 {{'__imma_m32n8k16_mma_s8' needs target feature sm_72{{.*}},ptx63{{.*}}}}
__imma_m32n8k16_mma_s8(dst, src, src, src, 3, 1);
// CHECK_PTX63_SM72: call {{.*}} @llvm.nvvm.wmma.m32n8k16.mma.col.row.s8
// expected-error-re@+1 {{'__imma_m32n8k16_mma_s8' needs target feature sm_72{{.*}},ptx63{{.*}}}}
__imma_m32n8k16_mma_s8(dst, src, src, src, 2, 0);
// CHECK_PTX63_SM72: call {{.*}} @llvm.nvvm.wmma.m32n8k16.mma.col.row.s8.satfinite
// expected-error-re@+1 {{'__imma_m32n8k16_mma_s8' needs target feature sm_72{{.*}},ptx63{{.*}}}}
__imma_m32n8k16_mma_s8(dst, src, src, src, 2, 1);
// CHECK_PTX63_SM72: call {{.*}} @llvm.nvvm.wmma.m32n8k16.mma.row.col.s8
// expected-error-re@+1 {{'__imma_m32n8k16_mma_s8' needs target feature sm_72{{.*}},ptx63{{.*}}}}
__imma_m32n8k16_mma_s8(dst, src, src, src, 1, 0);
// CHECK_PTX63_SM72: call {{.*}} @llvm.nvvm.wmma.m32n8k16.mma.row.col.s8.satfinite
// expected-error-re@+1 {{'__imma_m32n8k16_mma_s8' needs target feature sm_72{{.*}},ptx63{{.*}}}}
__imma_m32n8k16_mma_s8(dst, src, src, src, 1, 1);
// CHECK_PTX63_SM72: call {{.*}} @llvm.nvvm.wmma.m32n8k16.mma.row.row.s8
// expected-error-re@+1 {{'__imma_m32n8k16_mma_s8' needs target feature sm_72{{.*}},ptx63{{.*}}}}
__imma_m32n8k16_mma_s8(dst, src, src, src, 0, 0);
// CHECK_PTX63_SM72: call {{.*}} @llvm.nvvm.wmma.m32n8k16.mma.row.row.s8.satfinite
// expected-error-re@+1 {{'__imma_m32n8k16_mma_s8' needs target feature sm_72{{.*}},ptx63{{.*}}}}
__imma_m32n8k16_mma_s8(dst, src, src, src, 0, 1);
// CHECK_PTX63_SM72: call {{.*}} @llvm.nvvm.wmma.m32n8k16.mma.col.col.u8
// expected-error-re@+1 {{'__imma_m32n8k16_mma_u8' needs target feature sm_72{{.*}},ptx63{{.*}}}}
__imma_m32n8k16_mma_u8(dst, src, src, src, 3, 0);
// CHECK_PTX63_SM72: call {{.*}} @llvm.nvvm.wmma.m32n8k16.mma.col.col.u8.satfinite
// expected-error-re@+1 {{'__imma_m32n8k16_mma_u8' needs target feature sm_72{{.*}},ptx63{{.*}}}}
__imma_m32n8k16_mma_u8(dst, src, src, src, 3, 1);
// CHECK_PTX63_SM72: call {{.*}} @llvm.nvvm.wmma.m32n8k16.mma.col.row.u8
// expected-error-re@+1 {{'__imma_m32n8k16_mma_u8' needs target feature sm_72{{.*}},ptx63{{.*}}}}
__imma_m32n8k16_mma_u8(dst, src, src, src, 2, 0);
// CHECK_PTX63_SM72: call {{.*}} @llvm.nvvm.wmma.m32n8k16.mma.col.row.u8.satfinite
// expected-error-re@+1 {{'__imma_m32n8k16_mma_u8' needs target feature sm_72{{.*}},ptx63{{.*}}}}
__imma_m32n8k16_mma_u8(dst, src, src, src, 2, 1);
// CHECK_PTX63_SM72: call {{.*}} @llvm.nvvm.wmma.m32n8k16.mma.row.col.u8
// expected-error-re@+1 {{'__imma_m32n8k16_mma_u8' needs target feature sm_72{{.*}},ptx63{{.*}}}}
__imma_m32n8k16_mma_u8(dst, src, src, src, 1, 0);
// CHECK_PTX63_SM72: call {{.*}} @llvm.nvvm.wmma.m32n8k16.mma.row.col.u8.satfinite
// expected-error-re@+1 {{'__imma_m32n8k16_mma_u8' needs target feature sm_72{{.*}},ptx63{{.*}}}}
__imma_m32n8k16_mma_u8(dst, src, src, src, 1, 1);
// CHECK_PTX63_SM72: call {{.*}} @llvm.nvvm.wmma.m32n8k16.mma.row.row.u8
// expected-error-re@+1 {{'__imma_m32n8k16_mma_u8' needs target feature sm_72{{.*}},ptx63{{.*}}}}
__imma_m32n8k16_mma_u8(dst, src, src, src, 0, 0);
// CHECK_PTX63_SM72: call {{.*}} @llvm.nvvm.wmma.m32n8k16.mma.row.row.u8.satfinite
// expected-error-re@+1 {{'__imma_m32n8k16_mma_u8' needs target feature sm_72{{.*}},ptx63{{.*}}}}
__imma_m32n8k16_mma_u8(dst, src, src, src, 0, 1);
// CHECK_PTX63_SM72: call {{.*}} @llvm.nvvm.wmma.m8n32k16.mma.col.col.s8
// expected-error-re@+1 {{'__imma_m8n32k16_mma_s8' needs target feature sm_72{{.*}},ptx63{{.*}}}}
__imma_m8n32k16_mma_s8(dst, src, src, src, 3, 0);
// CHECK_PTX63_SM72: call {{.*}} @llvm.nvvm.wmma.m8n32k16.mma.col.col.s8.satfinite
// expected-error-re@+1 {{'__imma_m8n32k16_mma_s8' needs target feature sm_72{{.*}},ptx63{{.*}}}}
__imma_m8n32k16_mma_s8(dst, src, src, src, 3, 1);
// CHECK_PTX63_SM72: call {{.*}} @llvm.nvvm.wmma.m8n32k16.mma.col.row.s8
// expected-error-re@+1 {{'__imma_m8n32k16_mma_s8' needs target feature sm_72{{.*}},ptx63{{.*}}}}
__imma_m8n32k16_mma_s8(dst, src, src, src, 2, 0);
// CHECK_PTX63_SM72: call {{.*}} @llvm.nvvm.wmma.m8n32k16.mma.col.row.s8.satfinite
// expected-error-re@+1 {{'__imma_m8n32k16_mma_s8' needs target feature sm_72{{.*}},ptx63{{.*}}}}
__imma_m8n32k16_mma_s8(dst, src, src, src, 2, 1);
// CHECK_PTX63_SM72: call {{.*}} @llvm.nvvm.wmma.m8n32k16.mma.row.col.s8
// expected-error-re@+1 {{'__imma_m8n32k16_mma_s8' needs target feature sm_72{{.*}},ptx63{{.*}}}}
__imma_m8n32k16_mma_s8(dst, src, src, src, 1, 0);
// CHECK_PTX63_SM72: call {{.*}} @llvm.nvvm.wmma.m8n32k16.mma.row.col.s8.satfinite
// expected-error-re@+1 {{'__imma_m8n32k16_mma_s8' needs target feature sm_72{{.*}},ptx63{{.*}}}}
__imma_m8n32k16_mma_s8(dst, src, src, src, 1, 1);
// CHECK_PTX63_SM72: call {{.*}} @llvm.nvvm.wmma.m8n32k16.mma.row.row.s8
// expected-error-re@+1 {{'__imma_m8n32k16_mma_s8' needs target feature sm_72{{.*}},ptx63{{.*}}}}
__imma_m8n32k16_mma_s8(dst, src, src, src, 0, 0);
// CHECK_PTX63_SM72: call {{.*}} @llvm.nvvm.wmma.m8n32k16.mma.row.row.s8.satfinite
// expected-error-re@+1 {{'__imma_m8n32k16_mma_s8' needs target feature sm_72{{.*}},ptx63{{.*}}}}
__imma_m8n32k16_mma_s8(dst, src, src, src, 0, 1);
// CHECK_PTX63_SM72: call {{.*}} @llvm.nvvm.wmma.m8n32k16.mma.col.col.u8
// expected-error-re@+1 {{'__imma_m8n32k16_mma_u8' needs target feature sm_72{{.*}},ptx63{{.*}}}}
__imma_m8n32k16_mma_u8(dst, src, src, src, 3, 0);
// CHECK_PTX63_SM72: call {{.*}} @llvm.nvvm.wmma.m8n32k16.mma.col.col.u8.satfinite
// expected-error-re@+1 {{'__imma_m8n32k16_mma_u8' needs target feature sm_72{{.*}},ptx63{{.*}}}}
__imma_m8n32k16_mma_u8(dst, src, src, src, 3, 1);
// CHECK_PTX63_SM72: call {{.*}} @llvm.nvvm.wmma.m8n32k16.mma.col.row.u8
// expected-error-re@+1 {{'__imma_m8n32k16_mma_u8' needs target feature sm_72{{.*}},ptx63{{.*}}}}
__imma_m8n32k16_mma_u8(dst, src, src, src, 2, 0);
// CHECK_PTX63_SM72: call {{.*}} @llvm.nvvm.wmma.m8n32k16.mma.col.row.u8.satfinite
// expected-error-re@+1 {{'__imma_m8n32k16_mma_u8' needs target feature sm_72{{.*}},ptx63{{.*}}}}
__imma_m8n32k16_mma_u8(dst, src, src, src, 2, 1);
// CHECK_PTX63_SM72: call {{.*}} @llvm.nvvm.wmma.m8n32k16.mma.row.col.u8
// expected-error-re@+1 {{'__imma_m8n32k16_mma_u8' needs target feature sm_72{{.*}},ptx63{{.*}}}}
__imma_m8n32k16_mma_u8(dst, src, src, src, 1, 0);
// CHECK_PTX63_SM72: call {{.*}} @llvm.nvvm.wmma.m8n32k16.mma.row.col.u8.satfinite
// expected-error-re@+1 {{'__imma_m8n32k16_mma_u8' needs target feature sm_72{{.*}},ptx63{{.*}}}}
__imma_m8n32k16_mma_u8(dst, src, src, src, 1, 1);
// CHECK_PTX63_SM72: call {{.*}} @llvm.nvvm.wmma.m8n32k16.mma.row.row.u8
// expected-error-re@+1 {{'__imma_m8n32k16_mma_u8' needs target feature sm_72{{.*}},ptx63{{.*}}}}
__imma_m8n32k16_mma_u8(dst, src, src, src, 0, 0);
// CHECK_PTX63_SM72: call {{.*}} @llvm.nvvm.wmma.m8n32k16.mma.row.row.u8.satfinite
// expected-error-re@+1 {{'__imma_m8n32k16_mma_u8' needs target feature sm_72{{.*}},ptx63{{.*}}}}
__imma_m8n32k16_mma_u8(dst, src, src, src, 0, 1);
#endif // (PTX >= 63) && (SM >= 72)
#if (PTX >= 63) && (SM >= 75)
// CHECK_PTX63_SM75: call {{.*}} @llvm.nvvm.wmma.m8n8k128.load.a.row.stride.b1
// expected-error-re@+1 {{'__bmma_m8n8k128_ld_a_b1' needs target feature sm_75{{.*}},ptx63{{.*}}}}
__bmma_m8n8k128_ld_a_b1(dst, src, ldm, 0);
// CHECK_PTX63_SM75: call {{.*}} @llvm.nvvm.wmma.m8n8k128.load.b.col.stride.b1
// expected-error-re@+1 {{'__bmma_m8n8k128_ld_b_b1' needs target feature sm_75{{.*}},ptx63{{.*}}}}
__bmma_m8n8k128_ld_b_b1(dst, src, ldm, 1);
// CHECK_PTX63_SM75: call {{.*}} @llvm.nvvm.wmma.m8n8k128.load.c.col.stride.s32
// expected-error-re@+1 {{'__bmma_m8n8k128_ld_c' needs target feature sm_75{{.*}},ptx63{{.*}}}}
__bmma_m8n8k128_ld_c(dst, src, ldm, 1);
// CHECK_PTX63_SM75: call {{.*}} @llvm.nvvm.wmma.m8n8k128.load.c.row.stride.s32
// expected-error-re@+1 {{'__bmma_m8n8k128_ld_c' needs target feature sm_75{{.*}},ptx63{{.*}}}}
__bmma_m8n8k128_ld_c(dst, src, ldm, 0);
// CHECK_PTX63_SM75: call {{.*}} @llvm.nvvm.wmma.m8n8k128.store.d.col.stride.s32
// expected-error-re@+1 {{'__bmma_m8n8k128_st_c_i32' needs target feature sm_75{{.*}},ptx63{{.*}}}}
__bmma_m8n8k128_st_c_i32(dst, src, ldm, 1);
// CHECK_PTX63_SM75: call {{.*}} @llvm.nvvm.wmma.m8n8k128.store.d.row.stride.s32
// expected-error-re@+1 {{'__bmma_m8n8k128_st_c_i32' needs target feature sm_75{{.*}},ptx63{{.*}}}}
__bmma_m8n8k128_st_c_i32(dst, src, ldm, 0);
// CHECK_PTX63_SM75: call {{.*}} @llvm.nvvm.wmma.m8n8k32.load.a.row.stride.s4
// expected-error-re@+1 {{'__imma_m8n8k32_ld_a_s4' needs target feature sm_75{{.*}},ptx63{{.*}}}}
__imma_m8n8k32_ld_a_s4(dst, src, ldm, 0);
// CHECK_PTX63_SM75: call {{.*}} @llvm.nvvm.wmma.m8n8k32.load.a.row.stride.u4
// expected-error-re@+1 {{'__imma_m8n8k32_ld_a_u4' needs target feature sm_75{{.*}},ptx63{{.*}}}}
__imma_m8n8k32_ld_a_u4(dst, src, ldm, 0);
// CHECK_PTX63_SM75: call {{.*}} @llvm.nvvm.wmma.m8n8k32.load.b.col.stride.s4
// expected-error-re@+1 {{'__imma_m8n8k32_ld_b_s4' needs target feature sm_75{{.*}},ptx63{{.*}}}}
__imma_m8n8k32_ld_b_s4(dst, src, ldm, 1);
// CHECK_PTX63_SM75: call {{.*}} @llvm.nvvm.wmma.m8n8k32.load.b.col.stride.u4
// expected-error-re@+1 {{'__imma_m8n8k32_ld_b_u4' needs target feature sm_75{{.*}},ptx63{{.*}}}}
__imma_m8n8k32_ld_b_u4(dst, src, ldm, 1);
// CHECK_PTX63_SM75: call {{.*}} @llvm.nvvm.wmma.m8n8k32.load.c.col.stride.s32
// expected-error-re@+1 {{'__imma_m8n8k32_ld_c' needs target feature sm_75{{.*}},ptx63{{.*}}}}
__imma_m8n8k32_ld_c(dst, src, ldm, 1);
// CHECK_PTX63_SM75: call {{.*}} @llvm.nvvm.wmma.m8n8k32.load.c.row.stride.s32
// expected-error-re@+1 {{'__imma_m8n8k32_ld_c' needs target feature sm_75{{.*}},ptx63{{.*}}}}
__imma_m8n8k32_ld_c(dst, src, ldm, 0);
// CHECK_PTX63_SM75: call {{.*}} @llvm.nvvm.wmma.m8n8k32.store.d.col.stride.s32
// expected-error-re@+1 {{'__imma_m8n8k32_st_c_i32' needs target feature sm_75{{.*}},ptx63{{.*}}}}
__imma_m8n8k32_st_c_i32(dst, src, ldm, 1);
// CHECK_PTX63_SM75: call {{.*}} @llvm.nvvm.wmma.m8n8k32.store.d.row.stride.s32
// expected-error-re@+1 {{'__imma_m8n8k32_st_c_i32' needs target feature sm_75{{.*}},ptx63{{.*}}}}
__imma_m8n8k32_st_c_i32(dst, src, ldm, 0);
// CHECK_PTX63_SM75: call {{.*}} @llvm.nvvm.wmma.m8n8k128.mma.row.col.b1
// expected-error-re@+1 {{'__bmma_m8n8k128_mma_xor_popc_b1' needs target feature sm_75{{.*}},ptx63{{.*}}}}
__bmma_m8n8k128_mma_xor_popc_b1(dst, src, src, src, 1);
// CHECK_PTX63_SM75: call {{.*}} @llvm.nvvm.wmma.m8n8k32.mma.row.col.s4
// expected-error-re@+1 {{'__imma_m8n8k32_mma_s4' needs target feature sm_75{{.*}},ptx63{{.*}}}}
__imma_m8n8k32_mma_s4(dst, src, src, src, 1, 0);
// CHECK_PTX63_SM75: call {{.*}} @llvm.nvvm.wmma.m8n8k32.mma.row.col.s4.satfinite
// expected-error-re@+1 {{'__imma_m8n8k32_mma_s4' needs target feature sm_75{{.*}},ptx63{{.*}}}}
__imma_m8n8k32_mma_s4(dst, src, src, src, 1, 1);
// CHECK_PTX63_SM75: call {{.*}} @llvm.nvvm.wmma.m8n8k32.mma.row.col.u4
// expected-error-re@+1 {{'__imma_m8n8k32_mma_u4' needs target feature sm_75{{.*}},ptx63{{.*}}}}
__imma_m8n8k32_mma_u4(dst, src, src, src, 1, 0);
// CHECK_PTX63_SM75: call {{.*}} @llvm.nvvm.wmma.m8n8k32.mma.row.col.u4.satfinite
// expected-error-re@+1 {{'__imma_m8n8k32_mma_u4' needs target feature sm_75{{.*}},ptx63{{.*}}}}
__imma_m8n8k32_mma_u4(dst, src, src, src, 1, 1);
#endif // (PTX >= 63) && (SM >= 75)
}
|
0c78f330ee84ab04eeae0f85f8562aad013c39bf.hip | // !!! This is a file automatically generated by hipify!!!
#include <stdbool.h>
#include <stdio.h>
#include <string.h>
#include <getopt.h>
#include <hiprand/hiprand_kernel.h>
#include <stdlib.h>
#include <hip/hip_runtime.h>
#include <sys/time.h>
#include "vcopyfrom.cu"
#include<chrono>
#include<iostream>
using namespace std;
using namespace std::chrono;
int blocks_[20][2] = {{8,8},{16,16},{24,24},{32,32},{1,64},{1,128},{1,192},{1,256},{1,320},{1,384},{1,448},{1,512},{1,576},{1,640},{1,704},{1,768},{1,832},{1,896},{1,960},{1,1024}};
int matrices_[7][2] = {{240,240},{496,496},{784,784},{1016,1016},{1232,1232},{1680,1680},{2024,2024}};
int main(int argc, char **argv) {
hipSetDevice(0);
char* p;int matrix_len=strtol(argv[1], &p, 10);
for(int matrix_looper=0;matrix_looper<matrix_len;matrix_looper++){
for(int block_looper=0;block_looper<20;block_looper++){
int XSIZE=matrices_[matrix_looper][0],YSIZE=matrices_[matrix_looper][1],BLOCKX=blocks_[block_looper][0],BLOCKY=blocks_[block_looper][1];
const int n = 1;
const int shift = 1;
const double *a = NULL;
hipMalloc(&a, XSIZE*YSIZE);
double *b = NULL;
hipMalloc(&b, XSIZE*YSIZE);
int iXSIZE= XSIZE;
int iYSIZE= YSIZE;
while(iXSIZE%BLOCKX!=0)
{
iXSIZE++;
}
while(iYSIZE%BLOCKY!=0)
{
iYSIZE++;
}
dim3 gridBlock(iXSIZE/BLOCKX, iYSIZE/BLOCKY);
dim3 threadBlock(BLOCKX, BLOCKY);
hipFree(0);hipLaunchKernelGGL((
vcopyfrom), dim3(gridBlock),dim3(threadBlock), 0, 0, n,shift,a,b);
hipDeviceSynchronize();
for (int loop_counter = 0; loop_counter < 10; ++loop_counter) {hipLaunchKernelGGL((
vcopyfrom), dim3(gridBlock),dim3(threadBlock), 0, 0, n,shift,a,b);
}
auto start = steady_clock::now();
for (int loop_counter = 0; loop_counter < 1000; loop_counter++) {hipLaunchKernelGGL((
vcopyfrom), dim3(gridBlock),dim3(threadBlock), 0, 0, n,shift,a,b);
}
auto end = steady_clock::now();
auto usecs = duration_cast<duration<float, microseconds::period> >(end - start);
cout <<'['<<usecs.count()<<','<<'('<<BLOCKX<<','<<BLOCKY<<')' << ','<<'('<<XSIZE<<','<<YSIZE<<')'<<']' << endl;
}
}} | 0c78f330ee84ab04eeae0f85f8562aad013c39bf.cu | #include <stdbool.h>
#include <stdio.h>
#include <string.h>
#include <getopt.h>
#include <curand_kernel.h>
#include <stdlib.h>
#include <cuda.h>
#include <sys/time.h>
#include "vcopyfrom.cu"
#include<chrono>
#include<iostream>
using namespace std;
using namespace std::chrono;
int blocks_[20][2] = {{8,8},{16,16},{24,24},{32,32},{1,64},{1,128},{1,192},{1,256},{1,320},{1,384},{1,448},{1,512},{1,576},{1,640},{1,704},{1,768},{1,832},{1,896},{1,960},{1,1024}};
int matrices_[7][2] = {{240,240},{496,496},{784,784},{1016,1016},{1232,1232},{1680,1680},{2024,2024}};
int main(int argc, char **argv) {
cudaSetDevice(0);
char* p;int matrix_len=strtol(argv[1], &p, 10);
for(int matrix_looper=0;matrix_looper<matrix_len;matrix_looper++){
for(int block_looper=0;block_looper<20;block_looper++){
int XSIZE=matrices_[matrix_looper][0],YSIZE=matrices_[matrix_looper][1],BLOCKX=blocks_[block_looper][0],BLOCKY=blocks_[block_looper][1];
const int n = 1;
const int shift = 1;
const double *a = NULL;
cudaMalloc(&a, XSIZE*YSIZE);
double *b = NULL;
cudaMalloc(&b, XSIZE*YSIZE);
int iXSIZE= XSIZE;
int iYSIZE= YSIZE;
while(iXSIZE%BLOCKX!=0)
{
iXSIZE++;
}
while(iYSIZE%BLOCKY!=0)
{
iYSIZE++;
}
dim3 gridBlock(iXSIZE/BLOCKX, iYSIZE/BLOCKY);
dim3 threadBlock(BLOCKX, BLOCKY);
cudaFree(0);
vcopyfrom<<<gridBlock,threadBlock>>>(n,shift,a,b);
cudaDeviceSynchronize();
for (int loop_counter = 0; loop_counter < 10; ++loop_counter) {
vcopyfrom<<<gridBlock,threadBlock>>>(n,shift,a,b);
}
auto start = steady_clock::now();
for (int loop_counter = 0; loop_counter < 1000; loop_counter++) {
vcopyfrom<<<gridBlock,threadBlock>>>(n,shift,a,b);
}
auto end = steady_clock::now();
auto usecs = duration_cast<duration<float, microseconds::period> >(end - start);
cout <<'['<<usecs.count()<<','<<'('<<BLOCKX<<','<<BLOCKY<<')' << ','<<'('<<XSIZE<<','<<YSIZE<<')'<<']' << endl;
}
}} |
fb1dbca269101ed2bb5a41948f5025fc10bddd17.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "add_force.h"
#include <mirheo/core/pvs/particle_vector.h>
#include <mirheo/core/pvs/views/pv.h>
#include <mirheo/core/simulation.h>
#include <mirheo/core/utils/cuda_common.h>
#include <mirheo/core/utils/kernel_launch.h>
namespace mirheo
{
namespace add_force_kernels
{
__global__ void addForce(PVview view, real3 force)
{
int gid = blockIdx.x * blockDim.x + threadIdx.x;
if (gid >= view.size) return;
view.forces[gid] += make_real4(force, 0.0_r);
}
} // namespace add_force_kernels
AddForcePlugin::AddForcePlugin(const MirState *state, const std::string& name, const std::string& pvName, real3 force) :
SimulationPlugin(state, name),
pvName_(pvName),
force_(force)
{}
void AddForcePlugin::setup(Simulation *simulation, const MPI_Comm& comm, const MPI_Comm& interComm)
{
SimulationPlugin::setup(simulation, comm, interComm);
pv_ = simulation->getPVbyNameOrDie(pvName_);
}
void AddForcePlugin::beforeForces(hipStream_t stream)
{
PVview view(pv_, pv_->local());
const int nthreads = 128;
SAFE_KERNEL_LAUNCH(
add_force_kernels::addForce,
getNblocks(view.size, nthreads), nthreads, 0, stream,
view, force_ );
}
} // namespace mirheo
| fb1dbca269101ed2bb5a41948f5025fc10bddd17.cu | #include "add_force.h"
#include <mirheo/core/pvs/particle_vector.h>
#include <mirheo/core/pvs/views/pv.h>
#include <mirheo/core/simulation.h>
#include <mirheo/core/utils/cuda_common.h>
#include <mirheo/core/utils/kernel_launch.h>
namespace mirheo
{
namespace add_force_kernels
{
__global__ void addForce(PVview view, real3 force)
{
int gid = blockIdx.x * blockDim.x + threadIdx.x;
if (gid >= view.size) return;
view.forces[gid] += make_real4(force, 0.0_r);
}
} // namespace add_force_kernels
AddForcePlugin::AddForcePlugin(const MirState *state, const std::string& name, const std::string& pvName, real3 force) :
SimulationPlugin(state, name),
pvName_(pvName),
force_(force)
{}
void AddForcePlugin::setup(Simulation *simulation, const MPI_Comm& comm, const MPI_Comm& interComm)
{
SimulationPlugin::setup(simulation, comm, interComm);
pv_ = simulation->getPVbyNameOrDie(pvName_);
}
void AddForcePlugin::beforeForces(cudaStream_t stream)
{
PVview view(pv_, pv_->local());
const int nthreads = 128;
SAFE_KERNEL_LAUNCH(
add_force_kernels::addForce,
getNblocks(view.size, nthreads), nthreads, 0, stream,
view, force_ );
}
} // namespace mirheo
|
1af3230e55d6f3086bbabd5d0e885fc31b6b979d.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
//Seung Lee - A01021720
//Matrix Mult con tiles
//Reutilizamos un poco del codigo del a-1
#include "common.h"
#include <cstdio>
#include <cstdlib>
#include <math.h>
#include <chrono>
#include <iostream>
using namespace std;
//Profe leer el reporte para saber porque lo deje en 1700 :(
#define NSize 1700 //Definimos el tamano de nuestra matriz N x N
#define TILESize 32 //Cambiamos aqui el TILESize para que se cambie para todo el codigo
void fillMat(float * ip, const int size) { //Funcion para llenar nuestras matrices (hecho como el ejemplo en clase matrix_sum_1d)
for(int i = 0; i < size; i++) {
ip[i] = (rand() / (float)RAND_MAX * 10.0f); //Llenarlos con puros random floats entre 0 y 10
// ip[i] = i;
}
}
// grid 1D block 1D
__global__ void multMatrixOnGPU2D(float *A, float *B, float *C, int nx, int ny)
{
//Codigo de clase
unsigned int ix = threadIdx.x + blockIdx.x * blockDim.x;
unsigned int iy = threadIdx.y + blockIdx.y * blockDim.y;
// unsigned int idx = iy * nx + ix;
if (ix < nx && iy < ny) {
for(int i = 0; i < ny; i++) {
C[ix*ny+iy] += A[ix*ny+i] * B[i*ny+iy];
}
}
// float temp = 0;
// if (ix < nx && iy < ny){
// for(int i = 0; i < nx; i++) {
// for(int j = 0; j < ny; j++) {
// temp += (A[ix * nx + i] * B[ny + iy * j]);
// }
// }
// C[idx] = temp;
// }
}
//Funcion de matrix mult con tiles
__global__ void multMatrixOnTiles(float *A, float *B, float *C, int nx, int ny) {
//Codigo de clase
unsigned int ix = threadIdx.x + blockIdx.x * TILESize;
unsigned int iy = threadIdx.y + blockIdx.y * TILESize;
//Codigo de class demos
// __shared__ int s[256];
// int tid = blockIdx.x * blockDim.x + threadIdx.x;
__shared__ float matTempA[TILESize][TILESize];
__shared__ float matTempB[TILESize][TILESize];
//Llenamos las matrices shared y las inicializamos con puros 0's
for(int i = 0; i < TILESize; i ++) {
for(int j = 0; j < TILESize; j++) {
matTempA[i][j] = 0;
matTempB[i][j] = 0;
}
}
float temp = 0;
//vamos a traves de todos los tiles
for(int i = (TILESize + nx - 1)/TILESize; i >= 0; i--) {
if((i * TILESize + threadIdx.x) < nx && (iy < ny)) {
matTempA[threadIdx.y][threadIdx.x] = A[(iy*ny) + (i*TILESize+threadIdx.x)];
}
if((i * TILESize + threadIdx.y) < ny && (ix < nx)) {
matTempB[threadIdx.y][threadIdx.x] = B[(i*TILESize+threadIdx.y) * nx + ix];
}
__syncthreads(); //Tenemos que utilizar syncthreads despues de modificar las matrices en threadIdx
for(int j = 0; j < TILESize; j++) {
temp += matTempA[threadIdx.y][j] * matTempB[j][threadIdx.x];
}
__syncthreads();
}
// if (ix < nx && iy < ny) {
// for(int i = 0; i < ny; i++) {
// C[ix*ny+iy] += A[ix*ny+i] * B[i*ny+iy];
// }
// }
if(ix < nx && iy < ny) {
C[ix*ny+iy] = temp;
}
}
void multMat(float *A, float *B, float *C, const int nx, const int ny) { //Funcion para multiplicar matriz (como ejemplo)
for(int i = 0; i < ny; i++) {
for(int j = 0; j < nx; j++) {
for(int k = 0; k < ny; k++) { //Regla del karatazo pu pi pao
C[i * nx + j] += (A[i * nx + k] * B[k + nx * j]);
// printf("G"); //Debug
}
}
}
}
//Checar resultado
void checkResult(float *hostRef, float *gpuRef, const int N)
{
double epsilon = 1.0E-8;
bool match = 1;
for (int i = 0; i < N*N; i++)
{
if (fabs(hostRef[i] - gpuRef[i]) > epsilon)
{
match = 0;
printf("host %f gpu %f\n", hostRef[i], gpuRef[i]);
break;
}
}
if (match)
printf("Matrix multiplications from host and GPU match!.\n\n");
else
printf("Arrays do not match.\n\n");
}
int main(int argc, char **argv)
{
printf("%s Starting...\n", argv[0]);
// set up device
int dev = 0;
hipDeviceProp_t deviceProp;
SAFE_CALL(hipGetDeviceProperties(&deviceProp, dev), "Error device prop");
printf("Using Device %d: %s\n", dev, deviceProp.name);
SAFE_CALL(hipSetDevice(dev), "Error setting device");
// set up data size of matrix
int nx = NSize;
int ny = NSize;
int nxy = nx * ny;
int nBytes = nxy * sizeof(float);
printf("Matrix size: nx %d ny %d\n", nx, ny);
// malloc host memory
float *h_A, *h_B, *hostRef, *gpuRef;
h_A = (float *)malloc(nBytes);
h_B = (float *)malloc(nBytes);
hostRef = (float *)malloc(nBytes);
gpuRef = (float *)malloc(nBytes);
// Inicializar nuestros datos
fillMat(h_A, nxy);
fillMat(h_B, nxy);
memset(hostRef, 0, nBytes);
memset(gpuRef, 0, nBytes);
// add matrix at host side for result SAFE_CALLs
auto start_cpu = chrono::high_resolution_clock::now();
multMat(h_A, h_B, hostRef, nx, ny);
auto end_cpu = chrono::high_resolution_clock::now();
chrono::duration<float, std::milli> duration_ms = end_cpu - start_cpu;
printf("MultMat en Host elapsed %f ms\n\n", duration_ms.count());
// malloc device global memory
float *d_MatA, *d_MatB, *d_MatC;
SAFE_CALL(hipMalloc((void **)&d_MatA, nBytes), "Error allocating d_MatA");
SAFE_CALL(hipMalloc((void **)&d_MatB, nBytes), "Error allocating d_MatB");
SAFE_CALL(hipMalloc((void **)&d_MatC, nBytes), "Error allocating d_MatC");
// transfer data from host to device
SAFE_CALL(hipMemcpy(d_MatA, h_A, nBytes, hipMemcpyHostToDevice), "Error copying d_MatA");
SAFE_CALL(hipMemcpy(d_MatB, h_B, nBytes, hipMemcpyHostToDevice), "Error copying d_MatB");
// invoke kernel at host side
int dimx = 32;
int dimy = 32;
dim3 block(dimx, dimy);
dim3 grid((nx + block.x - 1) / block.x, (ny + block.y - 1) / block.y);
//MULTMAT ON GPU 2D_2D (ya se tenia)
//Multiplicar matrices con cantidad de repeticiones
int timeAverage = 0;
// add matrix at host side for result SAFE_CALLs
//Lo sacamos del ejemplo de clase
start_cpu = chrono::high_resolution_clock::now();
hipLaunchKernelGGL(( multMatrixOnGPU2D), dim3(grid), dim3(block), 0, 0, d_MatA, d_MatB, d_MatC, nx, ny);
SAFE_CALL(hipDeviceSynchronize(), "Error executing kernel");
end_cpu = chrono::high_resolution_clock::now();
duration_ms = end_cpu - start_cpu;
timeAverage += duration_ms.count();
int performanceTime = timeAverage;
printf("La cantidad de tiempo que se tarda cada ejecucion con GPU con threads es alrededor de: %d ms\n", performanceTime);
printf("Tamano de matriz: %d x %d\n", nx, ny);
// SAFE_CALL kernel error
SAFE_CALL(hipGetLastError(), "Error with last error");
// copy kernel result back to host side
SAFE_CALL(hipMemcpy(gpuRef, d_MatC, nBytes, hipMemcpyDeviceToHost), "Error copying d_MatC");
// check device results
checkResult(hostRef, gpuRef, nxy);
//MULTMAT CON TILING GPU
timeAverage = 0;
// add matrix at host side for result SAFE_CALLs
//Lo sacamos del ejemplo de clase
start_cpu = chrono::high_resolution_clock::now();
hipLaunchKernelGGL(( multMatrixOnTiles), dim3(grid), dim3(block), 0, 0, d_MatA, d_MatB, d_MatC, nx, ny);
SAFE_CALL(hipDeviceSynchronize(), "Error executing kernel");
end_cpu = chrono::high_resolution_clock::now();
duration_ms = end_cpu - start_cpu;
timeAverage += duration_ms.count();
performanceTime = timeAverage;
printf("La cantidad de tiempo que se tarda cada ejecucion con TILING de %d x %d es alrededor de: %d ms\n", TILESize, TILESize, performanceTime);
printf("Tamano de matriz: %d x %d\n", nx, ny);
// SAFE_CALL kernel error
SAFE_CALL(hipGetLastError(), "Error with last error");
// copy kernel result back to host side
SAFE_CALL(hipMemcpy(gpuRef, d_MatC, nBytes, hipMemcpyDeviceToHost), "Error copying d_MatC");
// check device results
checkResult(hostRef, gpuRef, nxy);
// free device global memory
SAFE_CALL(hipFree(d_MatA), "Error freeing memory");
SAFE_CALL(hipFree(d_MatB), "Error freeing memory");
SAFE_CALL(hipFree(d_MatC), "Error freeing memory");
// free host memory
free(h_A);
free(h_B);
free(hostRef);
free(gpuRef);
// reset device
SAFE_CALL(hipDeviceReset(), "Error reseting");
return (0);
} | 1af3230e55d6f3086bbabd5d0e885fc31b6b979d.cu | //Seung Lee - A01021720
//Matrix Mult con tiles
//Reutilizamos un poco del codigo del a-1
#include "common.h"
#include <cstdio>
#include <cstdlib>
#include <math.h>
#include <chrono>
#include <iostream>
using namespace std;
//Profe leer el reporte para saber porque lo deje en 1700 :(
#define NSize 1700 //Definimos el tamano de nuestra matriz N x N
#define TILESize 32 //Cambiamos aqui el TILESize para que se cambie para todo el codigo
void fillMat(float * ip, const int size) { //Funcion para llenar nuestras matrices (hecho como el ejemplo en clase matrix_sum_1d)
for(int i = 0; i < size; i++) {
ip[i] = (rand() / (float)RAND_MAX * 10.0f); //Llenarlos con puros random floats entre 0 y 10
// ip[i] = i;
}
}
// grid 1D block 1D
__global__ void multMatrixOnGPU2D(float *A, float *B, float *C, int nx, int ny)
{
//Codigo de clase
unsigned int ix = threadIdx.x + blockIdx.x * blockDim.x;
unsigned int iy = threadIdx.y + blockIdx.y * blockDim.y;
// unsigned int idx = iy * nx + ix;
if (ix < nx && iy < ny) {
for(int i = 0; i < ny; i++) {
C[ix*ny+iy] += A[ix*ny+i] * B[i*ny+iy];
}
}
// float temp = 0;
// if (ix < nx && iy < ny){
// for(int i = 0; i < nx; i++) {
// for(int j = 0; j < ny; j++) {
// temp += (A[ix * nx + i] * B[ny + iy * j]);
// }
// }
// C[idx] = temp;
// }
}
//Funcion de matrix mult con tiles
__global__ void multMatrixOnTiles(float *A, float *B, float *C, int nx, int ny) {
//Codigo de clase
unsigned int ix = threadIdx.x + blockIdx.x * TILESize;
unsigned int iy = threadIdx.y + blockIdx.y * TILESize;
//Codigo de class demos
// __shared__ int s[256];
// int tid = blockIdx.x * blockDim.x + threadIdx.x;
__shared__ float matTempA[TILESize][TILESize];
__shared__ float matTempB[TILESize][TILESize];
//Llenamos las matrices shared y las inicializamos con puros 0's
for(int i = 0; i < TILESize; i ++) {
for(int j = 0; j < TILESize; j++) {
matTempA[i][j] = 0;
matTempB[i][j] = 0;
}
}
float temp = 0;
//vamos a traves de todos los tiles
for(int i = (TILESize + nx - 1)/TILESize; i >= 0; i--) {
if((i * TILESize + threadIdx.x) < nx && (iy < ny)) {
matTempA[threadIdx.y][threadIdx.x] = A[(iy*ny) + (i*TILESize+threadIdx.x)];
}
if((i * TILESize + threadIdx.y) < ny && (ix < nx)) {
matTempB[threadIdx.y][threadIdx.x] = B[(i*TILESize+threadIdx.y) * nx + ix];
}
__syncthreads(); //Tenemos que utilizar syncthreads despues de modificar las matrices en threadIdx
for(int j = 0; j < TILESize; j++) {
temp += matTempA[threadIdx.y][j] * matTempB[j][threadIdx.x];
}
__syncthreads();
}
// if (ix < nx && iy < ny) {
// for(int i = 0; i < ny; i++) {
// C[ix*ny+iy] += A[ix*ny+i] * B[i*ny+iy];
// }
// }
if(ix < nx && iy < ny) {
C[ix*ny+iy] = temp;
}
}
void multMat(float *A, float *B, float *C, const int nx, const int ny) { //Funcion para multiplicar matriz (como ejemplo)
for(int i = 0; i < ny; i++) {
for(int j = 0; j < nx; j++) {
for(int k = 0; k < ny; k++) { //Regla del karatazo pu pi pao
C[i * nx + j] += (A[i * nx + k] * B[k + nx * j]);
// printf("G"); //Debug
}
}
}
}
//Checar resultado
void checkResult(float *hostRef, float *gpuRef, const int N)
{
double epsilon = 1.0E-8;
bool match = 1;
for (int i = 0; i < N*N; i++)
{
if (fabs(hostRef[i] - gpuRef[i]) > epsilon)
{
match = 0;
printf("host %f gpu %f\n", hostRef[i], gpuRef[i]);
break;
}
}
if (match)
printf("Matrix multiplications from host and GPU match!.\n\n");
else
printf("Arrays do not match.\n\n");
}
int main(int argc, char **argv)
{
printf("%s Starting...\n", argv[0]);
// set up device
int dev = 0;
cudaDeviceProp deviceProp;
SAFE_CALL(cudaGetDeviceProperties(&deviceProp, dev), "Error device prop");
printf("Using Device %d: %s\n", dev, deviceProp.name);
SAFE_CALL(cudaSetDevice(dev), "Error setting device");
// set up data size of matrix
int nx = NSize;
int ny = NSize;
int nxy = nx * ny;
int nBytes = nxy * sizeof(float);
printf("Matrix size: nx %d ny %d\n", nx, ny);
// malloc host memory
float *h_A, *h_B, *hostRef, *gpuRef;
h_A = (float *)malloc(nBytes);
h_B = (float *)malloc(nBytes);
hostRef = (float *)malloc(nBytes);
gpuRef = (float *)malloc(nBytes);
// Inicializar nuestros datos
fillMat(h_A, nxy);
fillMat(h_B, nxy);
memset(hostRef, 0, nBytes);
memset(gpuRef, 0, nBytes);
// add matrix at host side for result SAFE_CALLs
auto start_cpu = chrono::high_resolution_clock::now();
multMat(h_A, h_B, hostRef, nx, ny);
auto end_cpu = chrono::high_resolution_clock::now();
chrono::duration<float, std::milli> duration_ms = end_cpu - start_cpu;
printf("MultMat en Host elapsed %f ms\n\n", duration_ms.count());
// malloc device global memory
float *d_MatA, *d_MatB, *d_MatC;
SAFE_CALL(cudaMalloc((void **)&d_MatA, nBytes), "Error allocating d_MatA");
SAFE_CALL(cudaMalloc((void **)&d_MatB, nBytes), "Error allocating d_MatB");
SAFE_CALL(cudaMalloc((void **)&d_MatC, nBytes), "Error allocating d_MatC");
// transfer data from host to device
SAFE_CALL(cudaMemcpy(d_MatA, h_A, nBytes, cudaMemcpyHostToDevice), "Error copying d_MatA");
SAFE_CALL(cudaMemcpy(d_MatB, h_B, nBytes, cudaMemcpyHostToDevice), "Error copying d_MatB");
// invoke kernel at host side
int dimx = 32;
int dimy = 32;
dim3 block(dimx, dimy);
dim3 grid((nx + block.x - 1) / block.x, (ny + block.y - 1) / block.y);
//MULTMAT ON GPU 2D_2D (ya se tenia)
//Multiplicar matrices con cantidad de repeticiones
int timeAverage = 0;
// add matrix at host side for result SAFE_CALLs
//Lo sacamos del ejemplo de clase
start_cpu = chrono::high_resolution_clock::now();
multMatrixOnGPU2D<<<grid, block>>>(d_MatA, d_MatB, d_MatC, nx, ny);
SAFE_CALL(cudaDeviceSynchronize(), "Error executing kernel");
end_cpu = chrono::high_resolution_clock::now();
duration_ms = end_cpu - start_cpu;
timeAverage += duration_ms.count();
int performanceTime = timeAverage;
printf("La cantidad de tiempo que se tarda cada ejecucion con GPU con threads es alrededor de: %d ms\n", performanceTime);
printf("Tamano de matriz: %d x %d\n", nx, ny);
// SAFE_CALL kernel error
SAFE_CALL(cudaGetLastError(), "Error with last error");
// copy kernel result back to host side
SAFE_CALL(cudaMemcpy(gpuRef, d_MatC, nBytes, cudaMemcpyDeviceToHost), "Error copying d_MatC");
// check device results
checkResult(hostRef, gpuRef, nxy);
//MULTMAT CON TILING GPU
timeAverage = 0;
// add matrix at host side for result SAFE_CALLs
//Lo sacamos del ejemplo de clase
start_cpu = chrono::high_resolution_clock::now();
multMatrixOnTiles<<<grid, block>>>(d_MatA, d_MatB, d_MatC, nx, ny);
SAFE_CALL(cudaDeviceSynchronize(), "Error executing kernel");
end_cpu = chrono::high_resolution_clock::now();
duration_ms = end_cpu - start_cpu;
timeAverage += duration_ms.count();
performanceTime = timeAverage;
printf("La cantidad de tiempo que se tarda cada ejecucion con TILING de %d x %d es alrededor de: %d ms\n", TILESize, TILESize, performanceTime);
printf("Tamano de matriz: %d x %d\n", nx, ny);
// SAFE_CALL kernel error
SAFE_CALL(cudaGetLastError(), "Error with last error");
// copy kernel result back to host side
SAFE_CALL(cudaMemcpy(gpuRef, d_MatC, nBytes, cudaMemcpyDeviceToHost), "Error copying d_MatC");
// check device results
checkResult(hostRef, gpuRef, nxy);
// free device global memory
SAFE_CALL(cudaFree(d_MatA), "Error freeing memory");
SAFE_CALL(cudaFree(d_MatB), "Error freeing memory");
SAFE_CALL(cudaFree(d_MatC), "Error freeing memory");
// free host memory
free(h_A);
free(h_B);
free(hostRef);
free(gpuRef);
// reset device
SAFE_CALL(cudaDeviceReset(), "Error reseting");
return (0);
} |
4b3c6a72940f6079464e88ae2acb0bb53790015e.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <transient.hpp>
//#include <trans_cuda.hpp>
#include <map>
#include <thrust/for_each.h>
#include <string>
#include <utility>
//#include <boost/functional/hash.hpp>
//#include <boost/numeric/odeint/integrate/integrate_const.hpp>
//#include <boost/numeric/odeint/external/thrust/thrust.hpp>
//#include <iostream>
//#include <cmath>
//#include <boost/numeric/odeint/stepper/runge_kutta_dopri5.hpp>
//#include <thrust/device_vector.h>
//#include <thrust/iterator/permutation_iterator.h>
//#include <thrust/iterator/counting_iterator.h>
//#include <boost/numeric/odeint/external/thrust/thrust.hpp>
//#include <thrust/for_each.h>
//#include <thrust/device_vector.h>
//#include <thrust/execution_policy.h>
using namespace std;
using namespace boost::numeric::odeint;
namespace transient_analysis {
/*
typedef pair<string, string> strpair;
struct printf_functor
{
//template <string T1, GENERATOR T2>;
__host__ __device__
void operator()(const pair<string, string> mp)
{
// note that using printf in a __device__ function requires
// code compiled for a GPU with compute capability 2.0 or
// higher (nvcc --arch=sm_20)
//printf("%d\n", x);
//printf("%d\n", y);
printf("mp first=%s\n", mp.first);
printf("success in printf_functor\n");
//uint__t bus_id = bus_name_to_id[bus_name] - 1;
}
};
*/
//extern "C" void test_cuda(unordered_map<int, int> generators);
//extern "C" void test_cuda(map<string, GENERATOR> generators);
//void test_cuda(map<string, GENERATOR> &generators);
//extern __global__ void test_cuda();
Transient_Solver::
Transient_Solver(real__t start_time, real__t end_time, real__t time_stepping, string main_path)
: start_time(start_time), end_time(end_time), time_stepping(time_stepping), main_path(main_path) {}
Transient_Solver::~Transient_Solver() {
GraphLU_Destroy(Ybus_matrix);
free(Ybus_matrix);
free(eY);
free(ei);
free(ep);
free(gCurrent);
free(gVoltage);
}
void Transient_Solver::setup_system() {
load_system_data(main_path);
print_system_summary();
get_bus_name_id_mapping();
bus_types[1] = "PQ Bus";
bus_types[2] = "PV Bus";
bus_types[3] = "Slack Bus";
string output_path = "../output";
remove((output_path + "/genBusResults.csv").c_str());
remove((output_path + "/allBusResults.csv").c_str());
nBuses = buses.size();
nGen = generators.size();
nGenUnknowns = VS_output_idx + 1;
n = 2 * nBuses;
nnz = 4 * (line.size() + nBuses);
for (auto &g_hldr : generators) {
auto & bus_name=g_hldr.first;
auto & gen=g_hldr.second;
gen_solution [bus_name] = vector<real__t>(nGenUnknowns, 0.);
gen_error [bus_name] = vector<real__t>(nGenUnknowns, 0.);
gen_dq_current[bus_name] = vector<real__t>(2, 0.);
}
bus_voltage = vector<vector<real__t>> (nBuses, vector<real__t>(2, 0.));
// dump_Voltages();
print_gen_parameters();
// print_branch_data();
gCurrent = (real__t*)calloc(n, sizeof(real__t));
gVoltage = (real__t*)calloc(n, sizeof(real__t));
/** allocate memory for arrays used in CSR form */
eY = (real__t*)malloc(sizeof(real__t) * (nnz)); // nonzero values in Ybus matrix
ei = (uint__t*)malloc(sizeof(uint__t) * (nnz)); // column idx of Ybus matrix
ep = (uint__t*)malloc(sizeof(uint__t) * (n + 1)); // initial row pointers
Ybus_matrix = (SGraphLU*)malloc(sizeof(SGraphLU));
gVertex_all = vector<vector<real__t>>(nBuses, vector<real__t>(5, 0.));
current_time = start_time;
num_of_steps = 0;
tol = 1.e-4;
max_step_size = 0.01;
is_modify_Y_bus_matrix = true;
is_fault_treated = false;
}
void Transient_Solver::load_system_data(string folder_path) {
read_bus_data(buses, folder_path + "/system/Node.csv");
read_generator_node_data(buses, generators, folder_path + "/system/Generator.csv");
read_fdpf_data(buses, "../output/fdpf.csv");
// read_load_data(buses, folder_path + "/system/Load.csv");
read_compensator_P_data(buses, folder_path + "/system/Compensator_P.csv");
// read_DC_line_data(buses, line, folder_path + "/system/DC_Line.csv");
read_AC_line_data(buses, line, folder_path + "/system/AC_Line.csv");
read_two_winding_transformer_data(buses, line, folder_path + "/system/Two_winding_transformer.csv");
read_three_winding_transformer_data(buses, line, folder_path + "/system/Three_winding_transformer.csv");
read_EPRI_GEN_data(all_gen, folder_path + "/parameter/Synchronous_Machine.csv");
read_EPRI_GOV_I_data(all_gov_1, folder_path + "/parameter/Governor_1.csv");
read_EPRI_GOV_II_data(all_gov_2, folder_path + "/parameter/Governor_2.csv");
read_EPRI_GOV_III_data(all_gov_3, folder_path + "/parameter/Governor_3.csv");
read_EPRI_GOV_IV_data(all_gov_4, folder_path + "/parameter/Governor_4.csv");
read_EPRI_GOV_V_data(all_gov_5, folder_path + "/parameter/Governor_5.csv");
read_EPRI_GOV_VII_data(all_gov_7, folder_path + "/parameter/Governor_7.csv");
read_EPRI_GOV_VIII_data(all_gov_8, folder_path + "/parameter/Governor_8.csv");
read_EPRI_GOV_IX_data(all_gov_9, folder_path + "/parameter/Governor_9.csv");
read_EPRI_EXC_I_data(all_exc_1, folder_path + "/parameter/AVR_1.csv");
read_EPRI_EXC_II_data(all_exc_2, folder_path + "/parameter/AVR_2.csv");
read_EPRI_EXC_III_TO_X_data(all_exc_3_10, folder_path + "/parameter/AVR_3_to_10.csv");
read_EPRI_EXC_XI_TO_XII_data(all_exc_11_12, folder_path + "/parameter/AVR_11_to_12.csv");
read_EPRI_PSS_I_data(all_pss_1, folder_path + "/parameter/PSS_1.csv");
read_EPRI_PSS_II_data(all_pss_2, folder_path + "/parameter/PSS_2.csv");
read_EPRI_PSS_IV_VI_data(all_pss_4_6, folder_path + "/parameter/PSS_4_6.csv");
read_EPRI_PSS_V_data(all_pss_5, folder_path + "/parameter/PSS_5.csv");
read_EPRI_PSS_VIII_data(all_pss_8, folder_path + "/parameter/PSS_8.csv");
#if DEBUG
cout << "Transient simulation loading data success!\n";
#endif
}
void Transient_Solver::get_bus_name_id_mapping() {
int idx = 1; // id begins with 1
for (auto &b : buses) {
bus_name_to_id[b.first] = idx;
bus_id_to_name[idx] = b.first;
++idx;
}
#if DEBUG
printf("Transient simulation getting bus_name to bus_id mapping success!\n");
#endif
}
void Transient_Solver::dump_Voltages() {
if (current_time == start_time) {
for (int i = 0; i < buses.size(); ++i) {
string bus_name = bus_id_to_name[i];
bus_voltage[i][0] = buses[bus_name].Vm;
bus_voltage[i][1] = buses[bus_name].Va;
}
} else {
for (int i = 0; i < buses.size(); ++i) {
real__t Vx = gVoltage[i];
real__t Vy = gVoltage[i + nBuses];
bus_voltage[i][0] = sqrt(Vx * Vx + Vy * Vy);
bus_voltage[i][1] = atan2(Vy, Vx);
}
}
#if DEBUG
printf("Transient simulation dumping voltages success!\n");
#endif
}
string Transient_Solver::
solve_algebraic_equations_one_step(SGraphLU* matrix, real__t* rhs) {
/* call the graphlu solver to solve the sparse linear system
* notice that GraphLU_Solve_Singular allows the matrix to be *numerically*
* singular */
int ret_solve = GraphLU_Solve_Singular(matrix, rhs, 0);
if (ret_solve < 0) {
printf("Error: solve_algebraic_equations_one_step: %d\n", ret_solve);
return "FAILED";
}
return "SUCCESS";
}
/* a matrix-vector multiplier (not used in this code) */
void Transient_Solver::
matrix_vector_mult(const SGraphLU* matrix, const real__t* src, real__t* dst) {
uint__t n = matrix->n;
real__t* ax = matrix->ax;
uint__t* ai = matrix->ai;
uint__t* ap = matrix->ap;
for (int i = 0; i < n; ++i) {
dst[i] = 0;
for (int k = ap[i]; k < ap[i + 1]; ++k) {
dst[i] += ax[k] * src[ai[k]];
}
}
}
void Transient_Solver::update_step_size() {
real__t err = 0;
for (auto &g : gen_error) {
for (auto &v : g.second) err += abs(v * v);
}
err = max(sqrt(err), EPS);
printf("current error = %2.12f\n", err);
real__t q = pow(tol / err, 1. / 4.) * 0.8;
if (err < tol)
step_size = min(max(q, 0.1), 4.) * step_size;
else
step_size = min(max(q, 0.1), 1.) * step_size;
step_size = min(max_step_size, step_size);
}
void Transient_Solver::get_generator_current() {
for (auto &g_hldr : generators) {
auto & bus_name=g_hldr.first;
auto & gen=g_hldr.second;
uint__t bus_id = bus_name_to_id[bus_name] - 1;
uint__t Gen_Par = gen.Gen_Par;
if (Gen_Par == 0) continue;
real__t Vx = gVoltage[bus_id];
real__t Vy = gVoltage[bus_id + nBuses];
real__t delta = gen_solution[bus_name][delta_idx];
real__t Vd = Vx * sin(delta) - Vy * cos(delta);
real__t Vq = Vx * cos(delta) + Vy * sin(delta);
real__t Ra = all_gen[Gen_Par].Ra;
real__t Xdpp = all_gen[Gen_Par].Xdpp;
real__t Xqpp = all_gen[Gen_Par].Xqpp;
real__t Edpp = gen_solution[bus_name][Edpp_idx];
real__t Eqpp = gen_solution[bus_name][Eqpp_idx];
real__t denom = Ra * Ra + Xdpp * Xqpp;
assert(denom > EPS);
gen_dq_current[bus_name][0] = (+Ra * (Edpp - Vd) + Xqpp * (Eqpp - Vq)) / denom;
gen_dq_current[bus_name][1] = (-Xdpp * (Edpp - Vd) + Ra * (Eqpp - Vq)) / denom;
// cout << "dq currents in get_generator_current:" << endl;
// cout << "Id, Iq = " << gen_dq_current[bus_name][0] << ", " << gen_dq_current[bus_name][1] << endl;
#if DEBUG
printf("Transient simulation getting generator current success for bus %s!\n", bus_name.c_str());
#endif
}
}
void Transient_Solver::
apply_fault(const uint__t busID, const real__t t, const real__t fault_btime, const real__t fault_etime) {
/* busID is indexed from 1 */
if (t >= fault_btime && t < fault_etime - time_stepping) { // fault happens
if (is_fault_treated)
return;
for (int i = 0; i < gEdge_all.size(); ++i) {
if (gEdge_all[i][0] == busID - 1 && gEdge_all[i][1] == busID - 1) {
gEdge_all[i][2] += INF;
is_fault_treated = true;
break;
}
}
} else if (abs(t - fault_etime) <= EPS) { // fault cleared
is_fault_treated = false;
} else if (t > fault_etime) {
if (is_fault_treated)
return;
for (int i = 0; i < gEdge_all.size(); ++i) {
if (gEdge_all[i][0] == busID - 1 && gEdge_all[i][1] == busID - 1) {
gEdge_all[i][2] -= INF;
is_fault_treated = true;
break;
}
}
}
}
/** this function should be called after the compute_ODE_initial_values */
void Transient_Solver::assign_ode_solver_data(string bus_name, GENERATOR& gen) {
system.parameters.EXC_type = gen.AVR_Model;
switch (gen.AVR_Model) {
case 1:
system.parameters.exc_1 = all_exc_1[gen.AVR_Par]; break;
case 2:
system.parameters.exc_2 = all_exc_2[gen.AVR_Par]; break;
case 3: case 4: case 5: case 6: case 7: case 8: case 9: case 10:
system.parameters.exc_3_10 = all_exc_3_10[gen.AVR_Par]; break;
case 11: case 12:
system.parameters.exc_11_12 = all_exc_11_12[gen.AVR_Par]; break;
default:
break;
}
system.parameters.GOV_type = gen.GOV_Model;
switch (gen.GOV_Model) {
case 1: system.parameters.gov_1 = all_gov_1[gen.GOV_Par]; break;
case 2: system.parameters.gov_2 = all_gov_2[gen.GOV_Par]; break;
case 3: system.parameters.gov_3 = all_gov_3[gen.GOV_Par]; break;
case 4: system.parameters.gov_4 = all_gov_4[gen.GOV_Par]; break;
case 5: system.parameters.gov_5 = all_gov_5[gen.GOV_Par]; break;
case 7: system.parameters.gov_7 = all_gov_7[gen.GOV_Par]; break;
case 8: system.parameters.gov_8 = all_gov_8[gen.GOV_Par]; break;
case 9: system.parameters.gov_9 = all_gov_9[gen.GOV_Par]; break;
}
system.parameters.PSS_type = gen.PSS_Model;
switch (gen.PSS_Model) {
case 1: system.parameters.pss_1 = all_pss_1[gen.PSS_Par]; break;
case 2: system.parameters.pss_2 = all_pss_2[gen.PSS_Par]; break;
case 4: system.parameters.pss_4_6 = all_pss_4_6[gen.PSS_Par]; break;
case 5: system.parameters.pss_5 = all_pss_5[gen.PSS_Par]; break;
case 6: system.parameters.pss_4_6 = all_pss_4_6[gen.PSS_Par]; break;
case 8: system.parameters.pss_8 = all_pss_8[gen.PSS_Par]; break;
}
system.parameters.GEN_type = gen.Gen_Model;
if (gen.Gen_Par == 0) {
all_gen[0].Xdp = gen.Xdp < EPS ? 0.0001 : gen.Xdp;
all_gen[0].Xdpp = gen.Xdpp < EPS ? 0.0001 : gen.Xdpp;
all_gen[0].TJ = gen.TJ < EPS ? 999999.875 : gen.TJ;
all_gen[0].X2 = gen.X2;
all_gen[0].Ra = 0.;
}
system.parameters.gen = all_gen[gen.Gen_Par];
system.parameters.gen.bus_id = bus_name_to_id[bus_name] - 1;
system.parameters.omega_ref = gen.omega_ref;
system.parameters.freq_ref = gen.freq_ref;
system.parameters.Pe_ref = gen.Pe_ref;
system.parameters.Vt_ref = gen.Vt_ref;
system.parameters.Efd0 = gen.Efd0;
system.parameters.mu0 = gen.mu0;
system.parameters.Rate_MW = gen.Rate_MW;
}
void Transient_Solver::run(int argc, char** argv) {
/** initialize simulation settings */
setup_system();
compute_ODE_initial_values();
runge_kutta_dopri5<d_vector_type, value_type , d_vector_type , value_type> dopri5_stepper_type;
print_bus_data();
/** convert the data on the edges, notice that the conversion is
once for all, unless the topology of the network changes */
generate_edges_matrix();
const value_type dt = 0.1;
//d2_vector_type d2_gen_solution_set;
h2_vector_type h2_gen_solution_set;
h2_vector_type h2_gen_error_set;
//double h2_gen_solution_set[8][35];
while (current_time <= end_time) {
/* output every 10 steps - modify if necessary */
if ((num_of_steps % 5 == 0 && current_time > 0) || num_of_steps == 1) {
output_data_to_csv_one_step();
}
/* make sure that current_time + dt does not exceed end_time */
time_stepping = min(time_stepping, end_time - current_time);
if (time_stepping == 0) break;
/** add a fault to a LOAD bus */
int fault_bus_id = 28;
assert(fault_bus_id <= nBuses);
apply_fault(fault_bus_id, current_time, 2., 2.2);
/** update the buses info, then generate the new Y bus matrix */
convert_nodes();
convert_to_CSR_arrays();
generate_Y_Bus_matrix();
/** As graphlu returns the solution in-place, we copy gCurrent to gVoltage */
memcpy(gVoltage, gCurrent, sizeof(*gCurrent) * n);
/** solve one step of algebraic equations */
string result = solve_algebraic_equations_one_step(Ybus_matrix, gVoltage);
if (result == "FAILED") {
std::cerr << "Solving algebraic equations FAILED, please check!!!\n";
std::terminate();
}
int gen_length = 35;
int gen_count = 0;
int gen_attr_count = 0;
d_vector_type d2_gen_solution_set(GEN_SIZE*gen_length);
d_vector_type d2_gen_error_set(GEN_SIZE*gen_length);
std::clock_t start_forloop = std::clock();
for (auto& g_hldr : generators) {
auto & bus_name=g_hldr.first;
auto & gen=g_hldr.second;
uint__t bus_id = bus_name_to_id[bus_name] - 1;
/** update the Vx and Vy values at each gen node */
system.Vx[gen_count] = gVoltage[bus_id];
system.Vy[gen_count] = gVoltage[bus_id + nBuses];
system.Id[gen_count] = gen_dq_current[bus_name][0];
system.Iq[gen_count] = gen_dq_current[bus_name][1];
assign_ode_solver_data(bus_name, gen);
gen_length = gen_solution[bus_name].size();
d_vector_type d_gen_solution_set = gen_solution[bus_name];
d_vector_type d_gen_error_set = gen_error[bus_name];
//h2_gen_solution_set[gen_count][gen_attr_count]
h2_gen_solution_set.push_back(gen_solution[bus_name]);
h2_gen_error_set.push_back(gen_error[bus_name]);
thrust::copy(h2_gen_solution_set[gen_count].begin(), h2_gen_solution_set[gen_count].end(), &d2_gen_solution_set[gen_count*gen_length]);
thrust::copy(h2_gen_error_set[gen_count].begin(), h2_gen_error_set[gen_count].end(), &d2_gen_error_set[gen_count*gen_length]);
gen_count++;
}
//int gen_length = gen_solution[bus_name].size();
//d_vector_type d2_gen_solution_set(GEN_SIZE*gen_length);
//d_vector_type d2_gen_solution_set(h2_gen_solution_set.begin(), h2_gen_solution_set.end());
//thrust::copy(&(h2_gen_solution_set[0][0]), &(h2_gen_solution_set[7][gen_length-1]), d2_gen_solution_set.begin());
//for(int i=0; i < GEN_SIZE; i++){
// thrust::copy(h2_gen_solution_set[i].begin(), h2_gen_solution_set[i].end(), &d2_gen_solution_set[i*gen_length]);
// thrust::copy(h2_gen_error_set[i].begin(), h2_gen_error_set[i].end(), &d2_gen_error_set[i*gen_length]);
//}
thrust::sequence(d2_gen_solution_set.begin(), d2_gen_solution_set.end());
//d_vector_type d2_gen_error_set(GEN_SIZE*gen_length);
//thrust::copy(&(h2_gen_error_set[0][0]), &(h2_gen_error_set[7][gen_length-1]), d2_gen_error_set.begin());
//d_vector_type d2_gen_error_set(h2_gen_error_set.begin(), h2_gen_error_set.end());
//for(int i =0; i<GEN_SIZE; i++){
// thrust::copy(h2_gen_error_set[i].begin(), h2_gen_error_set[i].end(), &d2_gen_error_set[i*gen_length]);
// }
thrust::sequence(d2_gen_error_set.begin(), d2_gen_error_set.end());
//d_vector_type d2_gen_solution_set = h2_gen_solution_set;
//d_vector_type d2_gen_error_set = h2_gen_error_set;
std::clock_t start = std::clock();
dopri5_stepper_type.do_step(system, d2_gen_solution_set, current_time,
time_stepping, d2_gen_error_set);
printf("+++After 8 Gen computing: %.4f seconds\n\n", (std::clock() - start) / (real__t)CLOCKS_PER_SEC);
printf("+++After 8 Gen computing including forloop: %.4f seconds\n\n", (std::clock() - start_forloop) / (real__t)CLOCKS_PER_SEC);
int gen_ith = 0;
for (auto& g_hldr : generators) {
auto & bus_name=g_hldr.first;
auto & gen=g_hldr.second;
//d2_gen_solution_set.push_back(gen_solution[bus_name]);
//integrate_const( dopri5_stepper_type , system , d_gen_solution_set , 0.0 , 1.0 , dt );
////dopri5_stepper_type.do_step(system, d_gen_solution_set, current_time,
//// time_stepping, d_gen_error_set);
//dopri5_stepper_type.do_step(system, gen_solution[bus_name], current_time,
// time_stepping, gen_error[bus_name]);
//integrate_adaptive( make_controlled( 1.0e-6 , 1.0e-6 , dopri5_stepper_type() ) , system , x , t , t + 1.0 , 0.1 );
//cout << "End of dp_step===========================" << endl;
/** post-process: prepare for outputs */
gen_solution[bus_name][mu_output_idx] = system.get_mu(gen_ith);
gen_solution[bus_name][PT_output_idx] = system.get_Pmech(gen_ith);
gen_solution[bus_name][Efd_output_idx] = system.get_Efd(gen_ith);
gen_solution[bus_name][VS_output_idx] = system.get_VS(gen_ith);
gen_ith++;
/* update the time and number of steps */
//current_time += time_stepping;
//num_of_steps++;
}
//cout << "\n+++Now number of steps: " << num_of_steps << endl;
#if DEBUG
if (num_of_steps % 1 == 0) {
// print_matrix<real__t>(gEdge_all, "System Matrix");
// print_array<real__t>(gCurrent, buses.size(), "RHS: ");
// print_array<real__t>(gVoltage, buses.size(), "Solution: ");
print_bus_data();
print_gen_solution();
}
#endif
/* update the time and number of steps */
current_time += time_stepping;
num_of_steps++;
}
printf("\n\nTransient Simulation Result:\n");
print_bus_data();
print_gen_solution();
/* Done with the simulation! Congratulations! */
cout << "\n+++Total number of steps: " << num_of_steps << endl;
}
} // namespace transient_analysis
| 4b3c6a72940f6079464e88ae2acb0bb53790015e.cu | #include <transient.hpp>
//#include <trans_cuda.hpp>
#include <map>
#include <thrust/for_each.h>
#include <string>
#include <utility>
//#include <boost/functional/hash.hpp>
//#include <boost/numeric/odeint/integrate/integrate_const.hpp>
//#include <boost/numeric/odeint/external/thrust/thrust.hpp>
//#include <iostream>
//#include <cmath>
//#include <boost/numeric/odeint/stepper/runge_kutta_dopri5.hpp>
//#include <thrust/device_vector.h>
//#include <thrust/iterator/permutation_iterator.h>
//#include <thrust/iterator/counting_iterator.h>
//#include <boost/numeric/odeint/external/thrust/thrust.hpp>
//#include <thrust/for_each.h>
//#include <thrust/device_vector.h>
//#include <thrust/execution_policy.h>
using namespace std;
using namespace boost::numeric::odeint;
namespace transient_analysis {
/*
typedef pair<string, string> strpair;
struct printf_functor
{
//template <string T1, GENERATOR T2>;
__host__ __device__
void operator()(const pair<string, string> mp)
{
// note that using printf in a __device__ function requires
// code compiled for a GPU with compute capability 2.0 or
// higher (nvcc --arch=sm_20)
//printf("%d\n", x);
//printf("%d\n", y);
printf("mp first=%s\n", mp.first);
printf("success in printf_functor\n");
//uint__t bus_id = bus_name_to_id[bus_name] - 1;
}
};
*/
//extern "C" void test_cuda(unordered_map<int, int> generators);
//extern "C" void test_cuda(map<string, GENERATOR> generators);
//void test_cuda(map<string, GENERATOR> &generators);
//extern __global__ void test_cuda();
Transient_Solver::
Transient_Solver(real__t start_time, real__t end_time, real__t time_stepping, string main_path)
: start_time(start_time), end_time(end_time), time_stepping(time_stepping), main_path(main_path) {}
Transient_Solver::~Transient_Solver() {
GraphLU_Destroy(Ybus_matrix);
free(Ybus_matrix);
free(eY);
free(ei);
free(ep);
free(gCurrent);
free(gVoltage);
}
void Transient_Solver::setup_system() {
load_system_data(main_path);
print_system_summary();
get_bus_name_id_mapping();
bus_types[1] = "PQ Bus";
bus_types[2] = "PV Bus";
bus_types[3] = "Slack Bus";
string output_path = "../output";
remove((output_path + "/genBusResults.csv").c_str());
remove((output_path + "/allBusResults.csv").c_str());
nBuses = buses.size();
nGen = generators.size();
nGenUnknowns = VS_output_idx + 1;
n = 2 * nBuses;
nnz = 4 * (line.size() + nBuses);
for (auto &g_hldr : generators) {
auto & bus_name=g_hldr.first;
auto & gen=g_hldr.second;
gen_solution [bus_name] = vector<real__t>(nGenUnknowns, 0.);
gen_error [bus_name] = vector<real__t>(nGenUnknowns, 0.);
gen_dq_current[bus_name] = vector<real__t>(2, 0.);
}
bus_voltage = vector<vector<real__t>> (nBuses, vector<real__t>(2, 0.));
// dump_Voltages();
print_gen_parameters();
// print_branch_data();
gCurrent = (real__t*)calloc(n, sizeof(real__t));
gVoltage = (real__t*)calloc(n, sizeof(real__t));
/** allocate memory for arrays used in CSR form */
eY = (real__t*)malloc(sizeof(real__t) * (nnz)); // nonzero values in Ybus matrix
ei = (uint__t*)malloc(sizeof(uint__t) * (nnz)); // column idx of Ybus matrix
ep = (uint__t*)malloc(sizeof(uint__t) * (n + 1)); // initial row pointers
Ybus_matrix = (SGraphLU*)malloc(sizeof(SGraphLU));
gVertex_all = vector<vector<real__t>>(nBuses, vector<real__t>(5, 0.));
current_time = start_time;
num_of_steps = 0;
tol = 1.e-4;
max_step_size = 0.01;
is_modify_Y_bus_matrix = true;
is_fault_treated = false;
}
void Transient_Solver::load_system_data(string folder_path) {
read_bus_data(buses, folder_path + "/system/Node.csv");
read_generator_node_data(buses, generators, folder_path + "/system/Generator.csv");
read_fdpf_data(buses, "../output/fdpf.csv");
// read_load_data(buses, folder_path + "/system/Load.csv");
read_compensator_P_data(buses, folder_path + "/system/Compensator_P.csv");
// read_DC_line_data(buses, line, folder_path + "/system/DC_Line.csv");
read_AC_line_data(buses, line, folder_path + "/system/AC_Line.csv");
read_two_winding_transformer_data(buses, line, folder_path + "/system/Two_winding_transformer.csv");
read_three_winding_transformer_data(buses, line, folder_path + "/system/Three_winding_transformer.csv");
read_EPRI_GEN_data(all_gen, folder_path + "/parameter/Synchronous_Machine.csv");
read_EPRI_GOV_I_data(all_gov_1, folder_path + "/parameter/Governor_1.csv");
read_EPRI_GOV_II_data(all_gov_2, folder_path + "/parameter/Governor_2.csv");
read_EPRI_GOV_III_data(all_gov_3, folder_path + "/parameter/Governor_3.csv");
read_EPRI_GOV_IV_data(all_gov_4, folder_path + "/parameter/Governor_4.csv");
read_EPRI_GOV_V_data(all_gov_5, folder_path + "/parameter/Governor_5.csv");
read_EPRI_GOV_VII_data(all_gov_7, folder_path + "/parameter/Governor_7.csv");
read_EPRI_GOV_VIII_data(all_gov_8, folder_path + "/parameter/Governor_8.csv");
read_EPRI_GOV_IX_data(all_gov_9, folder_path + "/parameter/Governor_9.csv");
read_EPRI_EXC_I_data(all_exc_1, folder_path + "/parameter/AVR_1.csv");
read_EPRI_EXC_II_data(all_exc_2, folder_path + "/parameter/AVR_2.csv");
read_EPRI_EXC_III_TO_X_data(all_exc_3_10, folder_path + "/parameter/AVR_3_to_10.csv");
read_EPRI_EXC_XI_TO_XII_data(all_exc_11_12, folder_path + "/parameter/AVR_11_to_12.csv");
read_EPRI_PSS_I_data(all_pss_1, folder_path + "/parameter/PSS_1.csv");
read_EPRI_PSS_II_data(all_pss_2, folder_path + "/parameter/PSS_2.csv");
read_EPRI_PSS_IV_VI_data(all_pss_4_6, folder_path + "/parameter/PSS_4_6.csv");
read_EPRI_PSS_V_data(all_pss_5, folder_path + "/parameter/PSS_5.csv");
read_EPRI_PSS_VIII_data(all_pss_8, folder_path + "/parameter/PSS_8.csv");
#if DEBUG
cout << "Transient simulation loading data success!\n";
#endif
}
void Transient_Solver::get_bus_name_id_mapping() {
int idx = 1; // id begins with 1
for (auto &b : buses) {
bus_name_to_id[b.first] = idx;
bus_id_to_name[idx] = b.first;
++idx;
}
#if DEBUG
printf("Transient simulation getting bus_name to bus_id mapping success!\n");
#endif
}
void Transient_Solver::dump_Voltages() {
if (current_time == start_time) {
for (int i = 0; i < buses.size(); ++i) {
string bus_name = bus_id_to_name[i];
bus_voltage[i][0] = buses[bus_name].Vm;
bus_voltage[i][1] = buses[bus_name].Va;
}
} else {
for (int i = 0; i < buses.size(); ++i) {
real__t Vx = gVoltage[i];
real__t Vy = gVoltage[i + nBuses];
bus_voltage[i][0] = sqrt(Vx * Vx + Vy * Vy);
bus_voltage[i][1] = atan2(Vy, Vx);
}
}
#if DEBUG
printf("Transient simulation dumping voltages success!\n");
#endif
}
string Transient_Solver::
solve_algebraic_equations_one_step(SGraphLU* matrix, real__t* rhs) {
/* call the graphlu solver to solve the sparse linear system
* notice that GraphLU_Solve_Singular allows the matrix to be *numerically*
* singular */
int ret_solve = GraphLU_Solve_Singular(matrix, rhs, 0);
if (ret_solve < 0) {
printf("Error: solve_algebraic_equations_one_step: %d\n", ret_solve);
return "FAILED";
}
return "SUCCESS";
}
/* a matrix-vector multiplier (not used in this code) */
void Transient_Solver::
matrix_vector_mult(const SGraphLU* matrix, const real__t* src, real__t* dst) {
uint__t n = matrix->n;
real__t* ax = matrix->ax;
uint__t* ai = matrix->ai;
uint__t* ap = matrix->ap;
for (int i = 0; i < n; ++i) {
dst[i] = 0;
for (int k = ap[i]; k < ap[i + 1]; ++k) {
dst[i] += ax[k] * src[ai[k]];
}
}
}
void Transient_Solver::update_step_size() {
real__t err = 0;
for (auto &g : gen_error) {
for (auto &v : g.second) err += abs(v * v);
}
err = max(sqrt(err), EPS);
printf("current error = %2.12f\n", err);
real__t q = pow(tol / err, 1. / 4.) * 0.8;
if (err < tol)
step_size = min(max(q, 0.1), 4.) * step_size;
else
step_size = min(max(q, 0.1), 1.) * step_size;
step_size = min(max_step_size, step_size);
}
void Transient_Solver::get_generator_current() {
for (auto &g_hldr : generators) {
auto & bus_name=g_hldr.first;
auto & gen=g_hldr.second;
uint__t bus_id = bus_name_to_id[bus_name] - 1;
uint__t Gen_Par = gen.Gen_Par;
if (Gen_Par == 0) continue;
real__t Vx = gVoltage[bus_id];
real__t Vy = gVoltage[bus_id + nBuses];
real__t delta = gen_solution[bus_name][delta_idx];
real__t Vd = Vx * sin(delta) - Vy * cos(delta);
real__t Vq = Vx * cos(delta) + Vy * sin(delta);
real__t Ra = all_gen[Gen_Par].Ra;
real__t Xdpp = all_gen[Gen_Par].Xdpp;
real__t Xqpp = all_gen[Gen_Par].Xqpp;
real__t Edpp = gen_solution[bus_name][Edpp_idx];
real__t Eqpp = gen_solution[bus_name][Eqpp_idx];
real__t denom = Ra * Ra + Xdpp * Xqpp;
assert(denom > EPS);
gen_dq_current[bus_name][0] = (+Ra * (Edpp - Vd) + Xqpp * (Eqpp - Vq)) / denom;
gen_dq_current[bus_name][1] = (-Xdpp * (Edpp - Vd) + Ra * (Eqpp - Vq)) / denom;
// cout << "dq currents in get_generator_current:" << endl;
// cout << "Id, Iq = " << gen_dq_current[bus_name][0] << ", " << gen_dq_current[bus_name][1] << endl;
#if DEBUG
printf("Transient simulation getting generator current success for bus %s!\n", bus_name.c_str());
#endif
}
}
void Transient_Solver::
apply_fault(const uint__t busID, const real__t t, const real__t fault_btime, const real__t fault_etime) {
/* busID is indexed from 1 */
if (t >= fault_btime && t < fault_etime - time_stepping) { // fault happens
if (is_fault_treated)
return;
for (int i = 0; i < gEdge_all.size(); ++i) {
if (gEdge_all[i][0] == busID - 1 && gEdge_all[i][1] == busID - 1) {
gEdge_all[i][2] += INF;
is_fault_treated = true;
break;
}
}
} else if (abs(t - fault_etime) <= EPS) { // fault cleared
is_fault_treated = false;
} else if (t > fault_etime) {
if (is_fault_treated)
return;
for (int i = 0; i < gEdge_all.size(); ++i) {
if (gEdge_all[i][0] == busID - 1 && gEdge_all[i][1] == busID - 1) {
gEdge_all[i][2] -= INF;
is_fault_treated = true;
break;
}
}
}
}
/** this function should be called after the compute_ODE_initial_values */
void Transient_Solver::assign_ode_solver_data(string bus_name, GENERATOR& gen) {
system.parameters.EXC_type = gen.AVR_Model;
switch (gen.AVR_Model) {
case 1:
system.parameters.exc_1 = all_exc_1[gen.AVR_Par]; break;
case 2:
system.parameters.exc_2 = all_exc_2[gen.AVR_Par]; break;
case 3: case 4: case 5: case 6: case 7: case 8: case 9: case 10:
system.parameters.exc_3_10 = all_exc_3_10[gen.AVR_Par]; break;
case 11: case 12:
system.parameters.exc_11_12 = all_exc_11_12[gen.AVR_Par]; break;
default:
break;
}
system.parameters.GOV_type = gen.GOV_Model;
switch (gen.GOV_Model) {
case 1: system.parameters.gov_1 = all_gov_1[gen.GOV_Par]; break;
case 2: system.parameters.gov_2 = all_gov_2[gen.GOV_Par]; break;
case 3: system.parameters.gov_3 = all_gov_3[gen.GOV_Par]; break;
case 4: system.parameters.gov_4 = all_gov_4[gen.GOV_Par]; break;
case 5: system.parameters.gov_5 = all_gov_5[gen.GOV_Par]; break;
case 7: system.parameters.gov_7 = all_gov_7[gen.GOV_Par]; break;
case 8: system.parameters.gov_8 = all_gov_8[gen.GOV_Par]; break;
case 9: system.parameters.gov_9 = all_gov_9[gen.GOV_Par]; break;
}
system.parameters.PSS_type = gen.PSS_Model;
switch (gen.PSS_Model) {
case 1: system.parameters.pss_1 = all_pss_1[gen.PSS_Par]; break;
case 2: system.parameters.pss_2 = all_pss_2[gen.PSS_Par]; break;
case 4: system.parameters.pss_4_6 = all_pss_4_6[gen.PSS_Par]; break;
case 5: system.parameters.pss_5 = all_pss_5[gen.PSS_Par]; break;
case 6: system.parameters.pss_4_6 = all_pss_4_6[gen.PSS_Par]; break;
case 8: system.parameters.pss_8 = all_pss_8[gen.PSS_Par]; break;
}
system.parameters.GEN_type = gen.Gen_Model;
if (gen.Gen_Par == 0) {
all_gen[0].Xdp = gen.Xdp < EPS ? 0.0001 : gen.Xdp;
all_gen[0].Xdpp = gen.Xdpp < EPS ? 0.0001 : gen.Xdpp;
all_gen[0].TJ = gen.TJ < EPS ? 999999.875 : gen.TJ;
all_gen[0].X2 = gen.X2;
all_gen[0].Ra = 0.;
}
system.parameters.gen = all_gen[gen.Gen_Par];
system.parameters.gen.bus_id = bus_name_to_id[bus_name] - 1;
system.parameters.omega_ref = gen.omega_ref;
system.parameters.freq_ref = gen.freq_ref;
system.parameters.Pe_ref = gen.Pe_ref;
system.parameters.Vt_ref = gen.Vt_ref;
system.parameters.Efd0 = gen.Efd0;
system.parameters.mu0 = gen.mu0;
system.parameters.Rate_MW = gen.Rate_MW;
}
void Transient_Solver::run(int argc, char** argv) {
/** initialize simulation settings */
setup_system();
compute_ODE_initial_values();
runge_kutta_dopri5<d_vector_type, value_type , d_vector_type , value_type> dopri5_stepper_type;
print_bus_data();
/** convert the data on the edges, notice that the conversion is
once for all, unless the topology of the network changes */
generate_edges_matrix();
const value_type dt = 0.1;
//d2_vector_type d2_gen_solution_set;
h2_vector_type h2_gen_solution_set;
h2_vector_type h2_gen_error_set;
//double h2_gen_solution_set[8][35];
while (current_time <= end_time) {
/* output every 10 steps - modify if necessary */
if ((num_of_steps % 5 == 0 && current_time > 0) || num_of_steps == 1) {
output_data_to_csv_one_step();
}
/* make sure that current_time + dt does not exceed end_time */
time_stepping = min(time_stepping, end_time - current_time);
if (time_stepping == 0) break;
/** add a fault to a LOAD bus */
int fault_bus_id = 28;
assert(fault_bus_id <= nBuses);
apply_fault(fault_bus_id, current_time, 2., 2.2);
/** update the buses info, then generate the new Y bus matrix */
convert_nodes();
convert_to_CSR_arrays();
generate_Y_Bus_matrix();
/** As graphlu returns the solution in-place, we copy gCurrent to gVoltage */
memcpy(gVoltage, gCurrent, sizeof(*gCurrent) * n);
/** solve one step of algebraic equations */
string result = solve_algebraic_equations_one_step(Ybus_matrix, gVoltage);
if (result == "FAILED") {
std::cerr << "Solving algebraic equations FAILED, please check!!!\n";
std::terminate();
}
int gen_length = 35;
int gen_count = 0;
int gen_attr_count = 0;
d_vector_type d2_gen_solution_set(GEN_SIZE*gen_length);
d_vector_type d2_gen_error_set(GEN_SIZE*gen_length);
std::clock_t start_forloop = std::clock();
for (auto& g_hldr : generators) {
auto & bus_name=g_hldr.first;
auto & gen=g_hldr.second;
uint__t bus_id = bus_name_to_id[bus_name] - 1;
/** update the Vx and Vy values at each gen node */
system.Vx[gen_count] = gVoltage[bus_id];
system.Vy[gen_count] = gVoltage[bus_id + nBuses];
system.Id[gen_count] = gen_dq_current[bus_name][0];
system.Iq[gen_count] = gen_dq_current[bus_name][1];
assign_ode_solver_data(bus_name, gen);
gen_length = gen_solution[bus_name].size();
d_vector_type d_gen_solution_set = gen_solution[bus_name];
d_vector_type d_gen_error_set = gen_error[bus_name];
//h2_gen_solution_set[gen_count][gen_attr_count]
h2_gen_solution_set.push_back(gen_solution[bus_name]);
h2_gen_error_set.push_back(gen_error[bus_name]);
thrust::copy(h2_gen_solution_set[gen_count].begin(), h2_gen_solution_set[gen_count].end(), &d2_gen_solution_set[gen_count*gen_length]);
thrust::copy(h2_gen_error_set[gen_count].begin(), h2_gen_error_set[gen_count].end(), &d2_gen_error_set[gen_count*gen_length]);
gen_count++;
}
//int gen_length = gen_solution[bus_name].size();
//d_vector_type d2_gen_solution_set(GEN_SIZE*gen_length);
//d_vector_type d2_gen_solution_set(h2_gen_solution_set.begin(), h2_gen_solution_set.end());
//thrust::copy(&(h2_gen_solution_set[0][0]), &(h2_gen_solution_set[7][gen_length-1]), d2_gen_solution_set.begin());
//for(int i=0; i < GEN_SIZE; i++){
// thrust::copy(h2_gen_solution_set[i].begin(), h2_gen_solution_set[i].end(), &d2_gen_solution_set[i*gen_length]);
// thrust::copy(h2_gen_error_set[i].begin(), h2_gen_error_set[i].end(), &d2_gen_error_set[i*gen_length]);
//}
thrust::sequence(d2_gen_solution_set.begin(), d2_gen_solution_set.end());
//d_vector_type d2_gen_error_set(GEN_SIZE*gen_length);
//thrust::copy(&(h2_gen_error_set[0][0]), &(h2_gen_error_set[7][gen_length-1]), d2_gen_error_set.begin());
//d_vector_type d2_gen_error_set(h2_gen_error_set.begin(), h2_gen_error_set.end());
//for(int i =0; i<GEN_SIZE; i++){
// thrust::copy(h2_gen_error_set[i].begin(), h2_gen_error_set[i].end(), &d2_gen_error_set[i*gen_length]);
// }
thrust::sequence(d2_gen_error_set.begin(), d2_gen_error_set.end());
//d_vector_type d2_gen_solution_set = h2_gen_solution_set;
//d_vector_type d2_gen_error_set = h2_gen_error_set;
std::clock_t start = std::clock();
dopri5_stepper_type.do_step(system, d2_gen_solution_set, current_time,
time_stepping, d2_gen_error_set);
printf("+++After 8 Gen computing: %.4f seconds\n\n", (std::clock() - start) / (real__t)CLOCKS_PER_SEC);
printf("+++After 8 Gen computing including forloop: %.4f seconds\n\n", (std::clock() - start_forloop) / (real__t)CLOCKS_PER_SEC);
int gen_ith = 0;
for (auto& g_hldr : generators) {
auto & bus_name=g_hldr.first;
auto & gen=g_hldr.second;
//d2_gen_solution_set.push_back(gen_solution[bus_name]);
//integrate_const( dopri5_stepper_type , system , d_gen_solution_set , 0.0 , 1.0 , dt );
////dopri5_stepper_type.do_step(system, d_gen_solution_set, current_time,
//// time_stepping, d_gen_error_set);
//dopri5_stepper_type.do_step(system, gen_solution[bus_name], current_time,
// time_stepping, gen_error[bus_name]);
//integrate_adaptive( make_controlled( 1.0e-6 , 1.0e-6 , dopri5_stepper_type() ) , system , x , t , t + 1.0 , 0.1 );
//cout << "End of dp_step===========================" << endl;
/** post-process: prepare for outputs */
gen_solution[bus_name][mu_output_idx] = system.get_mu(gen_ith);
gen_solution[bus_name][PT_output_idx] = system.get_Pmech(gen_ith);
gen_solution[bus_name][Efd_output_idx] = system.get_Efd(gen_ith);
gen_solution[bus_name][VS_output_idx] = system.get_VS(gen_ith);
gen_ith++;
/* update the time and number of steps */
//current_time += time_stepping;
//num_of_steps++;
}
//cout << "\n+++Now number of steps: " << num_of_steps << endl;
#if DEBUG
if (num_of_steps % 1 == 0) {
// print_matrix<real__t>(gEdge_all, "System Matrix");
// print_array<real__t>(gCurrent, buses.size(), "RHS: ");
// print_array<real__t>(gVoltage, buses.size(), "Solution: ");
print_bus_data();
print_gen_solution();
}
#endif
/* update the time and number of steps */
current_time += time_stepping;
num_of_steps++;
}
printf("\n\nTransient Simulation Result:\n");
print_bus_data();
print_gen_solution();
/* Done with the simulation! Congratulations! */
cout << "\n+++Total number of steps: " << num_of_steps << endl;
}
} // namespace transient_analysis
|
ff8fa4bd54683797021daf062307003469193e50.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <stdio.h>
#include <time.h>
#include <unistd.h>
#include <stdlib.h>
#include <math.h>
using namespace std;
__global__ void _3Dstencil_sharedMemory(float *d_e,float *d_r,int X,int Y,int Z,int k){
//int thread_id = threadIdx.x + blockIdx.x * blockDim.x;
//printf("sou id %d || threadIdx.x %d || blockIdx.x %d || blockDim.x %d \n",thread_id,threadIdx.x ,blockIdx.x,blockDim.x);
//int thread_id = threadIdx.x + threadIdx.y*blockDim.x + (blockIdx.x + blockIdx.y*gridDim.x)*blockDim.x*blockDim.y;
//printf("sou id %d || threadIdx.x %d || blockIdx.x %d|| blockIdx.y %d || blockDim.x %d|| blockDim.y %d \n",thread_id,threadIdx.x ,blockIdx.x,blockIdx.y,blockDim.x,blockDim.y);
int x,y;
x = threadIdx.x + (blockIdx.x*blockDim.x);
y = threadIdx.y + (blockIdx.y*blockDim.y);
extern __shared__ float fatia[];
//printf("X = %d || Y = %d\n",x,y);
float *Zdata = new float[k+1];
int z=0;
Zdata[k/2] = d_e[x + ( y * (X) ) + ( z* (X*Y) )];
for(int lk =1;lk<(k/2)+1;lk++)
{
int h_e_i;
if(z+lk >= Z)
h_e_i = (x) + ( (y) * (X) ) + ( (z-lk) * (X*Y) );
else
h_e_i = (x) + ( (y) * (X) ) + ( (z+lk) * (X*Y) );
Zdata[k+1-lk] = d_e[h_e_i];
if(z-lk < 0)
h_e_i = (x) + ( (y) * (X) ) + ( (z+lk) * (X*Y) );
else
h_e_i = (x) + ( (y) * (X) ) + ( (z-lk) * (X*Y) );
Zdata[lk-1] = d_e[h_e_i];
}
for(int z=0; z<Z; z++)
{
int h_r_i = x + ( y * (X) ) + ( z* (X*Y) );
fatia[(k/2+threadIdx.x) + (k/2+threadIdx.y)*(blockDim.x+k)] = d_e[h_r_i];
//fatia[threadIdx.x + threadIdx.y*blockDim.x] = d_e[h_r_i];
if(threadIdx.y==0)
for(int lk =1;lk<(k/2)+1;lk++)
{
int h_e_i;
if(y-lk < 0)
h_e_i = (x) + ( (y+lk) * (X) ) + ( (z) * (X*Y) );
else
h_e_i = (x) + ( (y-lk) * (X) ) + ( (z) * (X*Y) );
fatia[(k/2+threadIdx.x) + ((threadIdx.y+(k/2+1)-lk-1)*(blockDim.x+k))] = d_e[h_e_i];
}
if(threadIdx.y==blockDim.y-1)
for(int lk =1;lk<(k/2)+1;lk++)
{
int h_e_i;
if(y+lk >= Y)
h_e_i = (x) + ( (y-lk) * (X) ) + ( (z) * (X*Y) );
else
h_e_i = (x) + ( (y+lk) * (X) ) + ( (z) * (X*Y) );
fatia[(k/2+threadIdx.x) + ((threadIdx.y+lk+k/2)*(blockDim.x+k))] = d_e[h_e_i];
}
if(threadIdx.x==0)
for(int lk =1;lk<(k/2)+1;lk++)
{
int h_e_i;
if(x-lk < 0)
h_e_i = (x+lk) + ( (y) * (X) ) + ( (z) * (X*Y) );
else
h_e_i = (x-lk) + ( (y) * (X) ) + ( (z) * (X*Y) );
fatia[((k/2+1)-lk+threadIdx.x-1) + ((threadIdx.y+k/2)*(blockDim.x+k))] = d_e[h_e_i];
}
if(threadIdx.x==blockDim.x-1)
for(int lk =1;lk<(k/2)+1;lk++)
{
int h_e_i;
if(x+lk >= X)
h_e_i = (x-lk) + ( (y) * (X) ) + ( (z) * (X*Y) );
else
h_e_i = (x+lk) + ( (y) * (X) ) + ( (z) * (X*Y) );
fatia[(lk+threadIdx.x+k/2) + ((threadIdx.y+k/2)*(blockDim.x+k))] = d_e[h_e_i];
}
__syncthreads();
// if(blockIdx.x==0 && threadIdx.x==2 && threadIdx.y==2 && z==3)
// {
// printf("\nFATIA\n");
// for(int j=0;j<blockDim.y+k;j++)
// {
// for(int i=0;i<blockDim.x+k;i++)
// {
// printf(" %f",fatia[i+j*(blockDim.x+k)]);
// }
// printf("\n");
// }
// }
int h_e_i = h_r_i;
float temp = Zdata[k/2];
for(int lk =1;lk<(k/2)+1;lk++)
{
h_e_i = threadIdx.x+k/2+lk + (threadIdx.y+k/2)*(blockDim.x+k);
temp += fatia[h_e_i];
h_e_i = threadIdx.x+k/2-lk + (threadIdx.y+k/2)*(blockDim.x+k);
temp += fatia[h_e_i];
h_e_i = threadIdx.x+k/2 + (threadIdx.y+k/2+lk)*(blockDim.x+k);
temp += fatia[h_e_i];
h_e_i = threadIdx.x+k/2 + (threadIdx.y+k/2-lk)*(blockDim.x+k);
temp += fatia[h_e_i];
temp += Zdata[k+1-lk];
temp += Zdata[lk-1];
}
d_r[h_r_i] = temp;
if(z==Z-1)
break;
for(int i=0;i<k;i++)
Zdata[i]=Zdata[i+1];
int lk=k/2;
if(z+1+lk >= Z)
h_e_i = (x) + ( (y) * (X) ) + ( (z+1-lk) * (X*Y) );
else
h_e_i = (x) + ( (y) * (X) ) + ( (z+1+lk) * (X*Y) );
Zdata[k] = d_e[h_e_i];
}
}
/*
*argumentos
*1 - n_elementos
*2 - threads por bloco
*3 - n_blocos
*4 - print
*/
int main(int argc, char* argv[]) {
float *h_e,*h_r,*h_r_test;
float *d_e, *d_r;
int size,tam,times,sharedSize;
clock_t Ticks[2];
times = 1;
int X=8;
int Y=8;
int BX=8;
int BY=8;
int Z=4;
int k=2;
int GX=1;
int GY=1;
if(argc > 1)
{
X = atoi(argv[1]);
BX=X;
}
if(argc > 2)
{
Y = atoi(argv[2]);
BY = Y;
}
if(argc > 3)
Z = atoi(argv[3]);
if(argc > 4)
k = atoi(argv[4]);
if(X>32)
{
GX = ceil((float)X/(float)32);
BX = 32;
}
if(Y>32)
{
GY = ceil((float)Y/(float)32);
BY = 32;
}
dim3 block_dim(BX,BY,1);
dim3 grid_dim(GX,GY,1);
sharedSize = ((block_dim.x+k)*(block_dim.y+k))*sizeof(float);
//sharedSize = block_dim.x*block_dim.y*sizeof(float);
size = X * Y * Z * sizeof(float);
tam = X * Y * Z;
h_e = (float*) malloc(size);
h_r = (float*) malloc(size);
h_r_test = (float*) malloc(size);
hipMalloc(&d_e, size);
hipMalloc(&d_r, size);
for (int i = 0; i < tam; i++) {
h_e[i] = (float)(rand()%9000)/100.0;
h_r[i] = 0;
}
/* Copy vectors from host memory to device memory */
hipMemcpy(d_e, h_e, size, hipMemcpyHostToDevice);
hipMemcpy(d_r, h_r, size, hipMemcpyHostToDevice);
for(int t =0; t<times; t++)
{
for(int z=0; z<Z; z++)
{
for(int y=0; y<Y; y++)
{
for(int x=0; x<X; x++)
{
int h_r_i = x + ( y * (X) ) + ( z* (X*Y) );
int h_e_i = h_r_i;
//printf(" %f",h_e[h_e_i]);
h_r_test[h_r_i] = h_e[h_e_i];
for(int lk =1;lk<(k/2)+1;lk++)
{
if(x+lk >= X)
h_e_i = (x-lk) + ( (y) * (X) ) + ( (z) * (X*Y) );
else
h_e_i = (x+lk) + ( (y) * (X) ) + ( (z) * (X*Y) );
h_r_test[h_r_i] += h_e[h_e_i];
if(x-lk < 0)
h_e_i = (x+lk) + ( (y) * (X) ) + ( (z) * (X*Y) );
else
h_e_i = (x-lk) + ( (y) * (X) ) + ( (z) * (X*Y) );
h_r_test[h_r_i] += h_e[h_e_i];
if(y+lk >= Y)
h_e_i = (x) + ( (y-lk) * (X) ) + ( (z) * (X*Y) );
else
h_e_i = (x) + ( (y+lk) * (X) ) + ( (z) * (X*Y) );
h_r_test[h_r_i] += h_e[h_e_i];
if(y-lk < 0)
h_e_i = (x) + ( (y+lk) * (X) ) + ( (z) * (X*Y) );
else
h_e_i = (x) + ( (y-lk) * (X) ) + ( (z) * (X*Y) );
h_r_test[h_r_i] += h_e[h_e_i];
if(z+lk >= Z)
h_e_i = (x) + ( (y) * (X) ) + ( (z-lk) * (X*Y) );
else
h_e_i = (x) + ( (y) * (X) ) + ( (z+lk) * (X*Y) );
h_r_test[h_r_i] += h_e[h_e_i];
if(z-lk < 0)
h_e_i = (x) + ( (y) * (X) ) + ( (z+lk) * (X*Y) );
else
h_e_i = (x) + ( (y) * (X) ) + ( (z-lk) * (X*Y) );
h_r_test[h_r_i] += h_e[h_e_i];
}
}
//printf("\n");
}
//printf("-----\n\n");
}
for (int i = 0; i < tam; i++)
{
h_e[i] = h_r_test[i];
}
}
hipEvent_t start, stop;
hipEventCreate (&start);
hipEventCreate (&stop);
hipEventRecord (start, 0);
/******************
*** Kernel Call ***
*******************/
//_3Dstencil_global<<<blks,th_p_blk>>>(d_e,d_r,X,Y,Z);
hipLaunchKernelGGL(( _3Dstencil_sharedMemory), dim3(grid_dim),dim3(block_dim),sharedSize, 0, d_e,d_r,X,Y,Z,k);
hipError_t err = hipSuccess;
err = hipGetLastError();
if (err != hipSuccess)
{
fprintf(stderr, "Failed to launch _3Dstencil_global kernel (error code %s)!\n", hipGetErrorString(err));
}
/******************
*** Kernel Call ***
*******************/
hipDeviceSynchronize();
hipEventRecord (stop, 0);
hipEventSynchronize (stop);
float elapsedTime;
hipEventElapsedTime (&elapsedTime, start, stop);
hipEventDestroy(start);
hipEventDestroy(stop);
Ticks[1] = clock();
double Tempo = (Ticks[1] - Ticks[0]) * 1000.0 / CLOCKS_PER_SEC;
printf("X %d || Y %d \nBX %d || BY %d\nGX %d || GY %d\nZ %d \n",X,Y,BX,BY,GX,GY,Z);
printf ("[%d,%.5f,%.5f],\n", tam,elapsedTime,Tempo/1000.0);
hipMemcpy(h_r, d_r, size, hipMemcpyDeviceToHost);
bool certo=true;
//printf("threads/blk %d -- blocks %d\n",th_p_blk,blks);
for (int i = 0; i < tam; i++){
//printf("%d - %d\n",h_z_res[i],h_z[i]);
if(h_r_test[i] != h_r[i])
{
printf("i%d\n",i);
certo=false;
}
}
if(!certo)
printf("\n*****\n certo = %s\n*****\n", certo ? "true" : "false");
hipFree(d_e);
hipFree(d_r);
std::free(h_e);
std::free(h_r);
std::free(h_r_test);
return 0;
} /* main */
| ff8fa4bd54683797021daf062307003469193e50.cu | #include <stdio.h>
#include <time.h>
#include <unistd.h>
#include <stdlib.h>
#include <math.h>
using namespace std;
__global__ void _3Dstencil_sharedMemory(float *d_e,float *d_r,int X,int Y,int Z,int k){
//int thread_id = threadIdx.x + blockIdx.x * blockDim.x;
//printf("sou id %d || threadIdx.x %d || blockIdx.x %d || blockDim.x %d \n",thread_id,threadIdx.x ,blockIdx.x,blockDim.x);
//int thread_id = threadIdx.x + threadIdx.y*blockDim.x + (blockIdx.x + blockIdx.y*gridDim.x)*blockDim.x*blockDim.y;
//printf("sou id %d || threadIdx.x %d || blockIdx.x %d|| blockIdx.y %d || blockDim.x %d|| blockDim.y %d \n",thread_id,threadIdx.x ,blockIdx.x,blockIdx.y,blockDim.x,blockDim.y);
int x,y;
x = threadIdx.x + (blockIdx.x*blockDim.x);
y = threadIdx.y + (blockIdx.y*blockDim.y);
extern __shared__ float fatia[];
//printf("X = %d || Y = %d\n",x,y);
float *Zdata = new float[k+1];
int z=0;
Zdata[k/2] = d_e[x + ( y * (X) ) + ( z* (X*Y) )];
for(int lk =1;lk<(k/2)+1;lk++)
{
int h_e_i;
if(z+lk >= Z)
h_e_i = (x) + ( (y) * (X) ) + ( (z-lk) * (X*Y) );
else
h_e_i = (x) + ( (y) * (X) ) + ( (z+lk) * (X*Y) );
Zdata[k+1-lk] = d_e[h_e_i];
if(z-lk < 0)
h_e_i = (x) + ( (y) * (X) ) + ( (z+lk) * (X*Y) );
else
h_e_i = (x) + ( (y) * (X) ) + ( (z-lk) * (X*Y) );
Zdata[lk-1] = d_e[h_e_i];
}
for(int z=0; z<Z; z++)
{
int h_r_i = x + ( y * (X) ) + ( z* (X*Y) );
fatia[(k/2+threadIdx.x) + (k/2+threadIdx.y)*(blockDim.x+k)] = d_e[h_r_i];
//fatia[threadIdx.x + threadIdx.y*blockDim.x] = d_e[h_r_i];
if(threadIdx.y==0)
for(int lk =1;lk<(k/2)+1;lk++)
{
int h_e_i;
if(y-lk < 0)
h_e_i = (x) + ( (y+lk) * (X) ) + ( (z) * (X*Y) );
else
h_e_i = (x) + ( (y-lk) * (X) ) + ( (z) * (X*Y) );
fatia[(k/2+threadIdx.x) + ((threadIdx.y+(k/2+1)-lk-1)*(blockDim.x+k))] = d_e[h_e_i];
}
if(threadIdx.y==blockDim.y-1)
for(int lk =1;lk<(k/2)+1;lk++)
{
int h_e_i;
if(y+lk >= Y)
h_e_i = (x) + ( (y-lk) * (X) ) + ( (z) * (X*Y) );
else
h_e_i = (x) + ( (y+lk) * (X) ) + ( (z) * (X*Y) );
fatia[(k/2+threadIdx.x) + ((threadIdx.y+lk+k/2)*(blockDim.x+k))] = d_e[h_e_i];
}
if(threadIdx.x==0)
for(int lk =1;lk<(k/2)+1;lk++)
{
int h_e_i;
if(x-lk < 0)
h_e_i = (x+lk) + ( (y) * (X) ) + ( (z) * (X*Y) );
else
h_e_i = (x-lk) + ( (y) * (X) ) + ( (z) * (X*Y) );
fatia[((k/2+1)-lk+threadIdx.x-1) + ((threadIdx.y+k/2)*(blockDim.x+k))] = d_e[h_e_i];
}
if(threadIdx.x==blockDim.x-1)
for(int lk =1;lk<(k/2)+1;lk++)
{
int h_e_i;
if(x+lk >= X)
h_e_i = (x-lk) + ( (y) * (X) ) + ( (z) * (X*Y) );
else
h_e_i = (x+lk) + ( (y) * (X) ) + ( (z) * (X*Y) );
fatia[(lk+threadIdx.x+k/2) + ((threadIdx.y+k/2)*(blockDim.x+k))] = d_e[h_e_i];
}
__syncthreads();
// if(blockIdx.x==0 && threadIdx.x==2 && threadIdx.y==2 && z==3)
// {
// printf("\nFATIA\n");
// for(int j=0;j<blockDim.y+k;j++)
// {
// for(int i=0;i<blockDim.x+k;i++)
// {
// printf(" %f",fatia[i+j*(blockDim.x+k)]);
// }
// printf("\n");
// }
// }
int h_e_i = h_r_i;
float temp = Zdata[k/2];
for(int lk =1;lk<(k/2)+1;lk++)
{
h_e_i = threadIdx.x+k/2+lk + (threadIdx.y+k/2)*(blockDim.x+k);
temp += fatia[h_e_i];
h_e_i = threadIdx.x+k/2-lk + (threadIdx.y+k/2)*(blockDim.x+k);
temp += fatia[h_e_i];
h_e_i = threadIdx.x+k/2 + (threadIdx.y+k/2+lk)*(blockDim.x+k);
temp += fatia[h_e_i];
h_e_i = threadIdx.x+k/2 + (threadIdx.y+k/2-lk)*(blockDim.x+k);
temp += fatia[h_e_i];
temp += Zdata[k+1-lk];
temp += Zdata[lk-1];
}
d_r[h_r_i] = temp;
if(z==Z-1)
break;
for(int i=0;i<k;i++)
Zdata[i]=Zdata[i+1];
int lk=k/2;
if(z+1+lk >= Z)
h_e_i = (x) + ( (y) * (X) ) + ( (z+1-lk) * (X*Y) );
else
h_e_i = (x) + ( (y) * (X) ) + ( (z+1+lk) * (X*Y) );
Zdata[k] = d_e[h_e_i];
}
}
/*
*argumentos
*1 - n_elementos
*2 - threads por bloco
*3 - n_blocos
*4 - print
*/
int main(int argc, char* argv[]) {
float *h_e,*h_r,*h_r_test;
float *d_e, *d_r;
int size,tam,times,sharedSize;
clock_t Ticks[2];
times = 1;
int X=8;
int Y=8;
int BX=8;
int BY=8;
int Z=4;
int k=2;
int GX=1;
int GY=1;
if(argc > 1)
{
X = atoi(argv[1]);
BX=X;
}
if(argc > 2)
{
Y = atoi(argv[2]);
BY = Y;
}
if(argc > 3)
Z = atoi(argv[3]);
if(argc > 4)
k = atoi(argv[4]);
if(X>32)
{
GX = ceil((float)X/(float)32);
BX = 32;
}
if(Y>32)
{
GY = ceil((float)Y/(float)32);
BY = 32;
}
dim3 block_dim(BX,BY,1);
dim3 grid_dim(GX,GY,1);
sharedSize = ((block_dim.x+k)*(block_dim.y+k))*sizeof(float);
//sharedSize = block_dim.x*block_dim.y*sizeof(float);
size = X * Y * Z * sizeof(float);
tam = X * Y * Z;
h_e = (float*) malloc(size);
h_r = (float*) malloc(size);
h_r_test = (float*) malloc(size);
cudaMalloc(&d_e, size);
cudaMalloc(&d_r, size);
for (int i = 0; i < tam; i++) {
h_e[i] = (float)(rand()%9000)/100.0;
h_r[i] = 0;
}
/* Copy vectors from host memory to device memory */
cudaMemcpy(d_e, h_e, size, cudaMemcpyHostToDevice);
cudaMemcpy(d_r, h_r, size, cudaMemcpyHostToDevice);
for(int t =0; t<times; t++)
{
for(int z=0; z<Z; z++)
{
for(int y=0; y<Y; y++)
{
for(int x=0; x<X; x++)
{
int h_r_i = x + ( y * (X) ) + ( z* (X*Y) );
int h_e_i = h_r_i;
//printf(" %f",h_e[h_e_i]);
h_r_test[h_r_i] = h_e[h_e_i];
for(int lk =1;lk<(k/2)+1;lk++)
{
if(x+lk >= X)
h_e_i = (x-lk) + ( (y) * (X) ) + ( (z) * (X*Y) );
else
h_e_i = (x+lk) + ( (y) * (X) ) + ( (z) * (X*Y) );
h_r_test[h_r_i] += h_e[h_e_i];
if(x-lk < 0)
h_e_i = (x+lk) + ( (y) * (X) ) + ( (z) * (X*Y) );
else
h_e_i = (x-lk) + ( (y) * (X) ) + ( (z) * (X*Y) );
h_r_test[h_r_i] += h_e[h_e_i];
if(y+lk >= Y)
h_e_i = (x) + ( (y-lk) * (X) ) + ( (z) * (X*Y) );
else
h_e_i = (x) + ( (y+lk) * (X) ) + ( (z) * (X*Y) );
h_r_test[h_r_i] += h_e[h_e_i];
if(y-lk < 0)
h_e_i = (x) + ( (y+lk) * (X) ) + ( (z) * (X*Y) );
else
h_e_i = (x) + ( (y-lk) * (X) ) + ( (z) * (X*Y) );
h_r_test[h_r_i] += h_e[h_e_i];
if(z+lk >= Z)
h_e_i = (x) + ( (y) * (X) ) + ( (z-lk) * (X*Y) );
else
h_e_i = (x) + ( (y) * (X) ) + ( (z+lk) * (X*Y) );
h_r_test[h_r_i] += h_e[h_e_i];
if(z-lk < 0)
h_e_i = (x) + ( (y) * (X) ) + ( (z+lk) * (X*Y) );
else
h_e_i = (x) + ( (y) * (X) ) + ( (z-lk) * (X*Y) );
h_r_test[h_r_i] += h_e[h_e_i];
}
}
//printf("\n");
}
//printf("-----\n\n");
}
for (int i = 0; i < tam; i++)
{
h_e[i] = h_r_test[i];
}
}
cudaEvent_t start, stop;
cudaEventCreate (&start);
cudaEventCreate (&stop);
cudaEventRecord (start, 0);
/******************
*** Kernel Call ***
*******************/
//_3Dstencil_global<<<blks,th_p_blk>>>(d_e,d_r,X,Y,Z);
_3Dstencil_sharedMemory<<<grid_dim,block_dim,sharedSize>>>(d_e,d_r,X,Y,Z,k);
cudaError_t err = cudaSuccess;
err = cudaGetLastError();
if (err != cudaSuccess)
{
fprintf(stderr, "Failed to launch _3Dstencil_global kernel (error code %s)!\n", cudaGetErrorString(err));
}
/******************
*** Kernel Call ***
*******************/
cudaDeviceSynchronize();
cudaEventRecord (stop, 0);
cudaEventSynchronize (stop);
float elapsedTime;
cudaEventElapsedTime (&elapsedTime, start, stop);
cudaEventDestroy(start);
cudaEventDestroy(stop);
Ticks[1] = clock();
double Tempo = (Ticks[1] - Ticks[0]) * 1000.0 / CLOCKS_PER_SEC;
printf("X %d || Y %d \nBX %d || BY %d\nGX %d || GY %d\nZ %d \n",X,Y,BX,BY,GX,GY,Z);
printf ("[%d,%.5f,%.5f],\n", tam,elapsedTime,Tempo/1000.0);
cudaMemcpy(h_r, d_r, size, cudaMemcpyDeviceToHost);
bool certo=true;
//printf("threads/blk %d -- blocks %d\n",th_p_blk,blks);
for (int i = 0; i < tam; i++){
//printf("%d - %d\n",h_z_res[i],h_z[i]);
if(h_r_test[i] != h_r[i])
{
printf("i%d\n",i);
certo=false;
}
}
if(!certo)
printf("\n*****\n certo = %s\n*****\n", certo ? "true" : "false");
cudaFree(d_e);
cudaFree(d_r);
std::free(h_e);
std::free(h_r);
std::free(h_r_test);
return 0;
} /* main */
|
9a67a453cf1544195e34167d11d80fd7930ba61b.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/*
Copyright (c) 2013, NVIDIA Corporation
All rights reserved.
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions are met:
* Redistributions of source code must retain the above copyright
notice, this list of conditions and the following disclaimer.
* Redistributions in binary form must reproduce the above copyright
notice, this list of conditions and the following disclaimer in the
documentation and/or other materials provided with the distribution.
* Neither the name of the <organization> nor the
names of its contributors may be used to endorse or promote products
derived from this software without specific prior written permission.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
DISCLAIMED. IN NO EVENT SHALL <COPYRIGHT HOLDER> BE LIABLE FOR ANY
DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
#include <iostream>
#include <thrust/device_vector.h>
#include <thrust/host_vector.h>
#include <thrust/generate.h>
#include <thrust/sort.h>
#include <cstdlib>
#include <thrust/device_vector.h>
#include <thrust/equal.h>
#include <trove/ptr.h>
#include <trove/aos.h>
#include "timer.h"
using namespace trove;
template<typename T>
__global__ void
benchmark_contiguous_shfl_store(T* r) {
int global_index = threadIdx.x + blockDim.x * blockIdx.x;
T data;
int size = detail::aliased_size<T, int>::value;
data = counting_array<T>::impl(
global_index * size);
store_warp_contiguous(data, r + global_index);
}
template<typename T>
__global__ void
benchmark_contiguous_direct_store(T* r) {
int global_index = threadIdx.x + blockDim.x * blockIdx.x;
T data;
int size = detail::aliased_size<T, int>::value;
data = counting_array<T>::impl(
global_index * size);
r[global_index] = data;
}
template<typename T>
__global__ void
benchmark_contiguous_shfl_load(T* s, typename T::value_type* r) {
int global_index = threadIdx.x + blockDim.x * blockIdx.x;
T data = load_warp_contiguous(s + global_index);
r[global_index] = sum(data);
}
template<typename T>
__global__ void
benchmark_contiguous_direct_load(T* s, typename T::value_type* r) {
int global_index = threadIdx.x + blockDim.x * blockIdx.x;
T data = s[global_index];
r[global_index] = sum(data);
}
template<typename T>
__global__ void
benchmark_shfl_gather(const int* indices, T* raw_s, T* raw_r) {
int global_index = threadIdx.x + blockDim.x * blockIdx.x;
int index = indices[global_index];
trove::coalesced_ptr<T> s(raw_s);
trove::coalesced_ptr<T> r(raw_r);
T data = s[index];
r[global_index] = data;
}
template<typename T>
__global__ void
benchmark_shfl_scatter(const int* indices, T* raw_s, T* raw_r) {
int global_index = threadIdx.x + blockDim.x * blockIdx.x;
int index = indices[global_index];
trove::coalesced_ptr<T> s(raw_s);
trove::coalesced_ptr<T> r(raw_r);
T data = s[global_index];
r[index] = data;
}
template<typename T>
__global__ void
benchmark_direct_gather(const int* indices, T* s, T* r) {
int global_index = threadIdx.x + blockDim.x * blockIdx.x;
int index = indices[global_index];
T data = s[index];
r[global_index] = data;
}
template<typename T>
__global__ void
benchmark_direct_scatter(const int* indices, T* s, T* r) {
int global_index = threadIdx.x + blockDim.x * blockIdx.x;
int index = indices[global_index];
T data = s[global_index];
r[index] = data;
}
template<int i>
void run_benchmark_contiguous_store(const std::string name, void (*test)(array<int, i>*),
void (*gold)(array<int, i>*)) {
typedef array<int, i> T;
std::cout << name << ", " << i << ", ";
int n_blocks = 15 * 8 * 100;
int block_size = 256;
int n = n_blocks * block_size - 100;
thrust::device_vector<T> r(n);
int iterations = 10;
cuda_timer timer;
timer.start();
for(int j = 0; j < iterations; j++) {
hipLaunchKernelGGL(( test), dim3(n_blocks), dim3(block_size), 0, 0, thrust::raw_pointer_cast(r.data()));
}
float time = timer.stop();
float gbs = (float)(sizeof(T) * (iterations * n_blocks * block_size)) / (time * 1000000);
std::cout << gbs << ", ";
bool correct = true;
if (test != gold) {
thrust::device_vector<T> g(n);
hipLaunchKernelGGL(( gold), dim3(n_blocks), dim3(block_size), 0, 0, thrust::raw_pointer_cast(g.data()));
correct = thrust::equal(r.begin(), r.end(), g.begin());
}
if (correct)
std::cout << "Results passed";
else
std::cout << "INCORRECT";
std::cout << std::endl;
}
template<int i>
struct run_benchmark_contiguous_shfl_store {
typedef array<int, i> T;
static void impl() {
run_benchmark_contiguous_store("Contiguous SHFL Store", &benchmark_contiguous_shfl_store<T>,
&benchmark_contiguous_direct_store<T>);
}
};
template<int i>
struct run_benchmark_contiguous_direct_store {
typedef array<int, i> T;
static void impl() {
run_benchmark_contiguous_store("Contiguous Direct Store", &benchmark_contiguous_direct_store<T>,
&benchmark_contiguous_direct_store<T>);
}
};
template<typename T>
void fill_test(thrust::device_vector<T>& d) {
thrust::device_ptr<int> p = thrust::device_ptr<int>((int*)thrust::raw_pointer_cast(d.data()));
thrust::counting_iterator<int> c(0);
int s = d.size() * sizeof(T) / sizeof(int);
thrust::copy(c, c+s, p);
}
template<int i>
void run_benchmark_contiguous_load(const std::string name, void (*test)(array<int, i>*, int*),
void (*gold)(array<int, i>*, int*)) {
typedef array<int, i> T;
std::cout << name << ", " << i << ", ";
int n_blocks = 15 * 8 * 100;
int block_size = 256;
int n = n_blocks * block_size;
thrust::device_vector<T> s(n);
fill_test(s);
thrust::device_vector<int> r(n);
int iterations = 10;
cuda_timer timer;
hipEvent_t start0, stop0;
float time0;
hipEventCreate(&start0);
hipEventCreate(&stop0);
timer.start();
hipEventRecord(start0,0);
for(int j = 0; j < iterations; j++) {
hipLaunchKernelGGL(( test), dim3(n_blocks), dim3(block_size), 0, 0, thrust::raw_pointer_cast(s.data()), thrust::raw_pointer_cast(r.data()));
}
hipEventRecord(stop0,0);
hipDeviceSynchronize();
float time = timer.stop();
float gbs = (float)((sizeof(T) + sizeof(int)) * (iterations * n_blocks * block_size)) / (time * 1000000);
hipEventElapsedTime(&time0, start0, stop0);
hipEventDestroy(start0);
hipEventDestroy(stop0);
std::cout << gbs << ", (" << time0 << ")";
bool correct = true;
if (test != gold) {
thrust::device_vector<int> g(n);
hipLaunchKernelGGL(( gold), dim3(n_blocks), dim3(block_size), 0, 0, thrust::raw_pointer_cast(s.data()), thrust::raw_pointer_cast(g.data()));
correct = thrust::equal(r.begin(), r.end(), g.begin());
}
if (correct)
std::cout << "Results passed";
else
std::cout << "INCORRECT";
std::cout << std::endl;
}
template<int i>
struct run_benchmark_contiguous_shfl_load {
typedef array<int, i> T;
static void impl() {
run_benchmark_contiguous_load("Contiguous SHFL Load", &benchmark_contiguous_shfl_load<T>, &benchmark_contiguous_direct_load<T>);
}
};
template<int i>
struct run_benchmark_contiguous_direct_load {
typedef array<int, i> T;
static void impl() {
run_benchmark_contiguous_load("Contiguous Direct Load", &benchmark_contiguous_direct_load<T>, &benchmark_contiguous_direct_load<T>);
}
};
thrust::device_vector<int> make_device_random(int s) {
thrust::host_vector<int> h(s);
thrust::generate(h.begin(), h.end(), rand);
thrust::device_vector<int> d = h;
return d;
}
thrust::device_vector<int> make_random_permutation(int s) {
thrust::device_vector<int> keys = make_device_random(s);
thrust::counting_iterator<int> c(0);
thrust::device_vector<int> values(s);
thrust::copy(c, c+s, values.begin());
thrust::sort_by_key(keys.begin(), keys.end(), values.begin());
return values;
}
template<int i>
void run_benchmark_random(const std::string name, const thrust::device_vector<int>& permutation,
void (*test)(const int*, array<int, i>*, array<int, i>*),
void (*gold)(const int*, array<int, i>*, array<int, i>*)) {
typedef array<int, i> T;
std::cout << name << ", " << i << ", ";
int n_blocks = 15 * 8 * 100;
int block_size = 256;
int n = n_blocks * block_size;
thrust::device_vector<T> s(n);
fill_test(s);
thrust::device_vector<T> r(n);
int iterations = 10;
cuda_timer timer;
timer.start();
for(int j = 0; j < iterations; j++) {
hipLaunchKernelGGL(( test), dim3(n_blocks), dim3(block_size), 0, 0,
thrust::raw_pointer_cast(permutation.data()),
thrust::raw_pointer_cast(s.data()),
thrust::raw_pointer_cast(r.data()));
}
float time = timer.stop();
float gbs = (float)(sizeof(T) * (2 * iterations * n_blocks * block_size) + sizeof(int) * iterations * n_blocks * block_size) / (time * 1000000);
std::cout << gbs << ", ";
bool correct = true;
if (test != gold) {
thrust::device_vector<T> g(n);
hipLaunchKernelGGL(( gold), dim3(n_blocks), dim3(block_size), 0, 0, thrust::raw_pointer_cast(permutation.data()),
thrust::raw_pointer_cast(s.data()), thrust::raw_pointer_cast(g.data()));
correct = thrust::equal(r.begin(), r.end(), g.begin());
}
if (correct)
std::cout << "Results passed";
else
std::cout << "INCORRECT";
std::cout << std::endl;
}
template<int i>
struct run_benchmark_shfl_gather {
typedef array<int, i> T;
static void impl(const thrust::device_vector<int>& permutation) {
run_benchmark_random("SHFL Gather", permutation, &benchmark_shfl_gather<T>, &benchmark_direct_gather<T>);
}
};
template<int i>
struct run_benchmark_direct_gather {
typedef array<int, i> T;
static void impl(const thrust::device_vector<int>& permutation) {
run_benchmark_random("Direct Gather", permutation, &benchmark_direct_gather<T>, &benchmark_direct_gather<T>);
}
};
template<int i>
struct run_benchmark_shfl_scatter {
typedef array<int, i> T;
static void impl(const thrust::device_vector<int>& permutation) {
run_benchmark_random("SHFL Scatter", permutation, &benchmark_shfl_scatter<T>, &benchmark_direct_scatter<T>);
}
};
template<int i>
struct run_benchmark_direct_scatter {
typedef array<int, i> T;
static void impl(const thrust::device_vector<int>& permutation) {
run_benchmark_random("Direct Scatter", permutation, &benchmark_direct_scatter<T>, &benchmark_direct_scatter<T>);
}
};
template<template<int> class F, typename Cons>
struct do_tests {
static void impl() {
F<Cons::head>::impl();
do_tests<F, typename Cons::tail>::impl();
}
template<typename T>
static void impl(const T& t) {
F<Cons::head>::impl(t);
do_tests<F, typename Cons::tail>::impl(t);
}
};
template<template<int> class F>
struct do_tests<F, null_type> {
static void impl() {}
template<typename T>
static void impl(const T& t) {}
};
#ifndef LOWER_BOUND
#define LOWER_BOUND 1
#endif
#ifndef UPPER_BOUND
#define UPPER_BOUND 16
#endif
typedef static_range<LOWER_BOUND, UPPER_BOUND> sizes;
int main() {
do_tests<run_benchmark_contiguous_shfl_store, sizes>::impl();
do_tests<run_benchmark_contiguous_direct_store, sizes>::impl();
do_tests<run_benchmark_contiguous_shfl_load, sizes>::impl();
do_tests<run_benchmark_contiguous_direct_load, sizes>::impl();
int size = 15 * 8 * 100 * 256;
thrust::device_vector<int> permutation = make_random_permutation(size);
do_tests<run_benchmark_shfl_scatter, sizes>::impl(permutation);
do_tests<run_benchmark_direct_scatter, sizes>::impl(permutation);
do_tests<run_benchmark_shfl_gather, sizes>::impl(permutation);
do_tests<run_benchmark_direct_gather, sizes>::impl(permutation);
}
| 9a67a453cf1544195e34167d11d80fd7930ba61b.cu | /*
Copyright (c) 2013, NVIDIA Corporation
All rights reserved.
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions are met:
* Redistributions of source code must retain the above copyright
notice, this list of conditions and the following disclaimer.
* Redistributions in binary form must reproduce the above copyright
notice, this list of conditions and the following disclaimer in the
documentation and/or other materials provided with the distribution.
* Neither the name of the <organization> nor the
names of its contributors may be used to endorse or promote products
derived from this software without specific prior written permission.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
DISCLAIMED. IN NO EVENT SHALL <COPYRIGHT HOLDER> BE LIABLE FOR ANY
DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
#include <iostream>
#include <thrust/device_vector.h>
#include <thrust/host_vector.h>
#include <thrust/generate.h>
#include <thrust/sort.h>
#include <cstdlib>
#include <thrust/device_vector.h>
#include <thrust/equal.h>
#include <trove/ptr.h>
#include <trove/aos.h>
#include "timer.h"
using namespace trove;
template<typename T>
__global__ void
benchmark_contiguous_shfl_store(T* r) {
int global_index = threadIdx.x + blockDim.x * blockIdx.x;
T data;
int size = detail::aliased_size<T, int>::value;
data = counting_array<T>::impl(
global_index * size);
store_warp_contiguous(data, r + global_index);
}
template<typename T>
__global__ void
benchmark_contiguous_direct_store(T* r) {
int global_index = threadIdx.x + blockDim.x * blockIdx.x;
T data;
int size = detail::aliased_size<T, int>::value;
data = counting_array<T>::impl(
global_index * size);
r[global_index] = data;
}
template<typename T>
__global__ void
benchmark_contiguous_shfl_load(T* s, typename T::value_type* r) {
int global_index = threadIdx.x + blockDim.x * blockIdx.x;
T data = load_warp_contiguous(s + global_index);
r[global_index] = sum(data);
}
template<typename T>
__global__ void
benchmark_contiguous_direct_load(T* s, typename T::value_type* r) {
int global_index = threadIdx.x + blockDim.x * blockIdx.x;
T data = s[global_index];
r[global_index] = sum(data);
}
template<typename T>
__global__ void
benchmark_shfl_gather(const int* indices, T* raw_s, T* raw_r) {
int global_index = threadIdx.x + blockDim.x * blockIdx.x;
int index = indices[global_index];
trove::coalesced_ptr<T> s(raw_s);
trove::coalesced_ptr<T> r(raw_r);
T data = s[index];
r[global_index] = data;
}
template<typename T>
__global__ void
benchmark_shfl_scatter(const int* indices, T* raw_s, T* raw_r) {
int global_index = threadIdx.x + blockDim.x * blockIdx.x;
int index = indices[global_index];
trove::coalesced_ptr<T> s(raw_s);
trove::coalesced_ptr<T> r(raw_r);
T data = s[global_index];
r[index] = data;
}
template<typename T>
__global__ void
benchmark_direct_gather(const int* indices, T* s, T* r) {
int global_index = threadIdx.x + blockDim.x * blockIdx.x;
int index = indices[global_index];
T data = s[index];
r[global_index] = data;
}
template<typename T>
__global__ void
benchmark_direct_scatter(const int* indices, T* s, T* r) {
int global_index = threadIdx.x + blockDim.x * blockIdx.x;
int index = indices[global_index];
T data = s[global_index];
r[index] = data;
}
template<int i>
void run_benchmark_contiguous_store(const std::string name, void (*test)(array<int, i>*),
void (*gold)(array<int, i>*)) {
typedef array<int, i> T;
std::cout << name << ", " << i << ", ";
int n_blocks = 15 * 8 * 100;
int block_size = 256;
int n = n_blocks * block_size - 100;
thrust::device_vector<T> r(n);
int iterations = 10;
cuda_timer timer;
timer.start();
for(int j = 0; j < iterations; j++) {
test<<<n_blocks, block_size>>>(thrust::raw_pointer_cast(r.data()));
}
float time = timer.stop();
float gbs = (float)(sizeof(T) * (iterations * n_blocks * block_size)) / (time * 1000000);
std::cout << gbs << ", ";
bool correct = true;
if (test != gold) {
thrust::device_vector<T> g(n);
gold<<<n_blocks, block_size>>>(thrust::raw_pointer_cast(g.data()));
correct = thrust::equal(r.begin(), r.end(), g.begin());
}
if (correct)
std::cout << "Results passed";
else
std::cout << "INCORRECT";
std::cout << std::endl;
}
template<int i>
struct run_benchmark_contiguous_shfl_store {
typedef array<int, i> T;
static void impl() {
run_benchmark_contiguous_store("Contiguous SHFL Store", &benchmark_contiguous_shfl_store<T>,
&benchmark_contiguous_direct_store<T>);
}
};
template<int i>
struct run_benchmark_contiguous_direct_store {
typedef array<int, i> T;
static void impl() {
run_benchmark_contiguous_store("Contiguous Direct Store", &benchmark_contiguous_direct_store<T>,
&benchmark_contiguous_direct_store<T>);
}
};
template<typename T>
void fill_test(thrust::device_vector<T>& d) {
thrust::device_ptr<int> p = thrust::device_ptr<int>((int*)thrust::raw_pointer_cast(d.data()));
thrust::counting_iterator<int> c(0);
int s = d.size() * sizeof(T) / sizeof(int);
thrust::copy(c, c+s, p);
}
template<int i>
void run_benchmark_contiguous_load(const std::string name, void (*test)(array<int, i>*, int*),
void (*gold)(array<int, i>*, int*)) {
typedef array<int, i> T;
std::cout << name << ", " << i << ", ";
int n_blocks = 15 * 8 * 100;
int block_size = 256;
int n = n_blocks * block_size;
thrust::device_vector<T> s(n);
fill_test(s);
thrust::device_vector<int> r(n);
int iterations = 10;
cuda_timer timer;
cudaEvent_t start0, stop0;
float time0;
cudaEventCreate(&start0);
cudaEventCreate(&stop0);
timer.start();
cudaEventRecord(start0,0);
for(int j = 0; j < iterations; j++) {
test<<<n_blocks, block_size>>>(thrust::raw_pointer_cast(s.data()), thrust::raw_pointer_cast(r.data()));
}
cudaEventRecord(stop0,0);
cudaDeviceSynchronize();
float time = timer.stop();
float gbs = (float)((sizeof(T) + sizeof(int)) * (iterations * n_blocks * block_size)) / (time * 1000000);
cudaEventElapsedTime(&time0, start0, stop0);
cudaEventDestroy(start0);
cudaEventDestroy(stop0);
std::cout << gbs << ", (" << time0 << ")";
bool correct = true;
if (test != gold) {
thrust::device_vector<int> g(n);
gold<<<n_blocks, block_size>>>(thrust::raw_pointer_cast(s.data()), thrust::raw_pointer_cast(g.data()));
correct = thrust::equal(r.begin(), r.end(), g.begin());
}
if (correct)
std::cout << "Results passed";
else
std::cout << "INCORRECT";
std::cout << std::endl;
}
template<int i>
struct run_benchmark_contiguous_shfl_load {
typedef array<int, i> T;
static void impl() {
run_benchmark_contiguous_load("Contiguous SHFL Load", &benchmark_contiguous_shfl_load<T>, &benchmark_contiguous_direct_load<T>);
}
};
template<int i>
struct run_benchmark_contiguous_direct_load {
typedef array<int, i> T;
static void impl() {
run_benchmark_contiguous_load("Contiguous Direct Load", &benchmark_contiguous_direct_load<T>, &benchmark_contiguous_direct_load<T>);
}
};
thrust::device_vector<int> make_device_random(int s) {
thrust::host_vector<int> h(s);
thrust::generate(h.begin(), h.end(), rand);
thrust::device_vector<int> d = h;
return d;
}
thrust::device_vector<int> make_random_permutation(int s) {
thrust::device_vector<int> keys = make_device_random(s);
thrust::counting_iterator<int> c(0);
thrust::device_vector<int> values(s);
thrust::copy(c, c+s, values.begin());
thrust::sort_by_key(keys.begin(), keys.end(), values.begin());
return values;
}
template<int i>
void run_benchmark_random(const std::string name, const thrust::device_vector<int>& permutation,
void (*test)(const int*, array<int, i>*, array<int, i>*),
void (*gold)(const int*, array<int, i>*, array<int, i>*)) {
typedef array<int, i> T;
std::cout << name << ", " << i << ", ";
int n_blocks = 15 * 8 * 100;
int block_size = 256;
int n = n_blocks * block_size;
thrust::device_vector<T> s(n);
fill_test(s);
thrust::device_vector<T> r(n);
int iterations = 10;
cuda_timer timer;
timer.start();
for(int j = 0; j < iterations; j++) {
test<<<n_blocks, block_size>>>(
thrust::raw_pointer_cast(permutation.data()),
thrust::raw_pointer_cast(s.data()),
thrust::raw_pointer_cast(r.data()));
}
float time = timer.stop();
float gbs = (float)(sizeof(T) * (2 * iterations * n_blocks * block_size) + sizeof(int) * iterations * n_blocks * block_size) / (time * 1000000);
std::cout << gbs << ", ";
bool correct = true;
if (test != gold) {
thrust::device_vector<T> g(n);
gold<<<n_blocks, block_size>>>(thrust::raw_pointer_cast(permutation.data()),
thrust::raw_pointer_cast(s.data()), thrust::raw_pointer_cast(g.data()));
correct = thrust::equal(r.begin(), r.end(), g.begin());
}
if (correct)
std::cout << "Results passed";
else
std::cout << "INCORRECT";
std::cout << std::endl;
}
template<int i>
struct run_benchmark_shfl_gather {
typedef array<int, i> T;
static void impl(const thrust::device_vector<int>& permutation) {
run_benchmark_random("SHFL Gather", permutation, &benchmark_shfl_gather<T>, &benchmark_direct_gather<T>);
}
};
template<int i>
struct run_benchmark_direct_gather {
typedef array<int, i> T;
static void impl(const thrust::device_vector<int>& permutation) {
run_benchmark_random("Direct Gather", permutation, &benchmark_direct_gather<T>, &benchmark_direct_gather<T>);
}
};
template<int i>
struct run_benchmark_shfl_scatter {
typedef array<int, i> T;
static void impl(const thrust::device_vector<int>& permutation) {
run_benchmark_random("SHFL Scatter", permutation, &benchmark_shfl_scatter<T>, &benchmark_direct_scatter<T>);
}
};
template<int i>
struct run_benchmark_direct_scatter {
typedef array<int, i> T;
static void impl(const thrust::device_vector<int>& permutation) {
run_benchmark_random("Direct Scatter", permutation, &benchmark_direct_scatter<T>, &benchmark_direct_scatter<T>);
}
};
template<template<int> class F, typename Cons>
struct do_tests {
static void impl() {
F<Cons::head>::impl();
do_tests<F, typename Cons::tail>::impl();
}
template<typename T>
static void impl(const T& t) {
F<Cons::head>::impl(t);
do_tests<F, typename Cons::tail>::impl(t);
}
};
template<template<int> class F>
struct do_tests<F, null_type> {
static void impl() {}
template<typename T>
static void impl(const T& t) {}
};
#ifndef LOWER_BOUND
#define LOWER_BOUND 1
#endif
#ifndef UPPER_BOUND
#define UPPER_BOUND 16
#endif
typedef static_range<LOWER_BOUND, UPPER_BOUND> sizes;
int main() {
do_tests<run_benchmark_contiguous_shfl_store, sizes>::impl();
do_tests<run_benchmark_contiguous_direct_store, sizes>::impl();
do_tests<run_benchmark_contiguous_shfl_load, sizes>::impl();
do_tests<run_benchmark_contiguous_direct_load, sizes>::impl();
int size = 15 * 8 * 100 * 256;
thrust::device_vector<int> permutation = make_random_permutation(size);
do_tests<run_benchmark_shfl_scatter, sizes>::impl(permutation);
do_tests<run_benchmark_direct_scatter, sizes>::impl(permutation);
do_tests<run_benchmark_shfl_gather, sizes>::impl(permutation);
do_tests<run_benchmark_direct_gather, sizes>::impl(permutation);
}
|
7e2753cfa0d87cc53e3bf548e175f41f717105f4.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <algorithm>
#include <vector>
#include "caffe/layers/strip_decimals_layer.hpp"
namespace caffe {
template <typename Dtype>
__global__ void StripDecimalsForward(const int n, const Dtype* in, Dtype* out) {
CUDA_KERNEL_LOOP(index, n) {
out[index] = (Dtype)((int)in[index]);
}
}
template <typename Dtype>
void StripDecimalsLayer<Dtype>::Forward_gpu(const vector<Blob<Dtype>*>& bottom,
const vector<Blob<Dtype>*>& top) {
const Dtype* bottom_data = bottom[0]->gpu_data();
Dtype* top_data = top[0]->mutable_gpu_data();
const int count = bottom[0]->count();
// NOLINT_NEXT_LINE(whitespace/operators)
hipLaunchKernelGGL(( StripDecimalsForward<Dtype>), dim3(CAFFE_GET_BLOCKS(count)), dim3(CAFFE_CUDA_NUM_THREADS), 0, 0,
count, bottom_data, top_data);
CUDA_POST_KERNEL_CHECK;
}
template <typename Dtype>
void StripDecimalsLayer<Dtype>::Backward_gpu(const vector<Blob<Dtype>*>& top,
const vector<bool>& propagate_down,
const vector<Blob<Dtype>*>& bottom) {
}
INSTANTIATE_LAYER_GPU_FUNCS(StripDecimalsLayer);
} // namespace caffe
| 7e2753cfa0d87cc53e3bf548e175f41f717105f4.cu | #include <algorithm>
#include <vector>
#include "caffe/layers/strip_decimals_layer.hpp"
namespace caffe {
template <typename Dtype>
__global__ void StripDecimalsForward(const int n, const Dtype* in, Dtype* out) {
CUDA_KERNEL_LOOP(index, n) {
out[index] = (Dtype)((int)in[index]);
}
}
template <typename Dtype>
void StripDecimalsLayer<Dtype>::Forward_gpu(const vector<Blob<Dtype>*>& bottom,
const vector<Blob<Dtype>*>& top) {
const Dtype* bottom_data = bottom[0]->gpu_data();
Dtype* top_data = top[0]->mutable_gpu_data();
const int count = bottom[0]->count();
// NOLINT_NEXT_LINE(whitespace/operators)
StripDecimalsForward<Dtype><<<CAFFE_GET_BLOCKS(count), CAFFE_CUDA_NUM_THREADS>>>(
count, bottom_data, top_data);
CUDA_POST_KERNEL_CHECK;
}
template <typename Dtype>
void StripDecimalsLayer<Dtype>::Backward_gpu(const vector<Blob<Dtype>*>& top,
const vector<bool>& propagate_down,
const vector<Blob<Dtype>*>& bottom) {
}
INSTANTIATE_LAYER_GPU_FUNCS(StripDecimalsLayer);
} // namespace caffe
|
3f13240cfd9111aa63e7c4578c272e0537c9fe65.hip | // !!! This is a file automatically generated by hipify!!!
/*
* Copyright (c) 2019, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
/**
* @file reader_impl.cu
* @brief cuDF-IO CSV reader class implementation
**/
#include "reader_impl.hpp"
#include <algorithm>
#include <iostream>
#include <numeric>
#include <tuple>
#include <unordered_map>
#include "legacy/datetime_parser.cuh"
#include "legacy/type_conversion.cuh"
#include <utilities/legacy/cudf_utils.h>
#include <cudf/legacy/unary.hpp>
#include <cudf/table/table.hpp>
#include <cudf/utilities/error.hpp>
#include <nvstrings/NVStrings.h>
#include <io/comp/io_uncomp.h>
#include <io/utilities/parsing_utils.cuh>
#include <io/utilities/type_conversion.cuh>
using std::string;
using std::vector;
namespace cudf {
namespace experimental {
namespace io {
namespace detail {
namespace csv {
using namespace cudf::io::csv;
using namespace cudf::io;
/**---------------------------------------------------------------------------*
* @brief Estimates the maximum expected length or a row, based on the number
* of columns
*
* If the number of columns is not available, it will return a value large
* enough for most use cases
*
* @param[in] num_columns Number of columns in the CSV file (optional)
*
* @return Estimated maximum size of a row, in bytes
*---------------------------------------------------------------------------**/
constexpr size_t calculateMaxRowSize(int num_columns = 0) noexcept {
constexpr size_t max_row_bytes = 16 * 1024; // 16KB
constexpr size_t column_bytes = 64;
constexpr size_t base_padding = 1024; // 1KB
if (num_columns == 0) {
// Use flat size if the number of columns is not known
return max_row_bytes;
} else {
// Expand the size based on the number of columns, if available
return base_padding + num_columns * column_bytes;
}
}
/**
* @brief Translates a dtype string and returns its dtype enumeration and any
* extended dtype flags that are supported by cuIO. Often, this is a column
* with the same underlying dtype the basic types, but with different parsing
* interpretations.
*
* @param[in] dtype String containing the basic or extended dtype
*
* @return std::pair<gdf_dtype, column_parse::flags> Tuple of dtype and flags
*/
std::tuple<data_type, column_parse::flags> get_dtype_info(
const std::string &dtype) {
if (dtype == "hex" || dtype == "hex64") {
return std::make_tuple(data_type{cudf::type_id::INT64},
column_parse::as_hexadecimal);
}
if (dtype == "hex32") {
return std::make_tuple(data_type{cudf::type_id::INT32},
column_parse::as_hexadecimal);
}
return std::make_tuple(convert_string_to_dtype(dtype), column_parse::as_default);
}
/**
* @brief Removes the first and Last quote in the string
*/
string removeQuotes(string str, char quotechar) {
// Exclude first and last quotation char
const size_t first_quote = str.find(quotechar);
if (first_quote != string::npos) {
str.erase(first_quote, 1);
}
const size_t last_quote = str.rfind(quotechar);
if (last_quote != string::npos) {
str.erase(last_quote, 1);
}
return str;
}
/**
* @brief Parse the first row to set the column names in the raw_csv parameter.
* The first row can be either the header row, or the first data row
*/
std::vector<std::string> setColumnNames(std::vector<char> const &header,
ParseOptions const &opts,
int header_row, std::string prefix) {
std::vector<std::string> col_names;
// If there is only a single character then it would be the terminator
if (header.size() <= 1) {
return col_names;
}
std::vector<char> first_row = header;
int num_cols = 0;
bool quotation = false;
for (size_t pos = 0, prev = 0; pos < first_row.size(); ++pos) {
// Flip the quotation flag if current character is a quotechar
if (first_row[pos] == opts.quotechar) {
quotation = !quotation;
}
// Check if end of a column/row
else if (pos == first_row.size() - 1 ||
(!quotation && first_row[pos] == opts.terminator) ||
(!quotation && first_row[pos] == opts.delimiter)) {
// This is the header, add the column name
if (header_row >= 0) {
// Include the current character, in case the line is not terminated
int col_name_len = pos - prev + 1;
// Exclude the delimiter/terminator is present
if (first_row[pos] == opts.delimiter ||
first_row[pos] == opts.terminator) {
--col_name_len;
}
// Also exclude '\r' character at the end of the column name if it's
// part of the terminator
if (col_name_len > 0 && opts.terminator == '\n' &&
first_row[pos] == '\n' && first_row[pos - 1] == '\r') {
--col_name_len;
}
const string new_col_name(first_row.data() + prev, col_name_len);
col_names.push_back(removeQuotes(new_col_name, opts.quotechar));
// Stop parsing when we hit the line terminator; relevant when there is
// a blank line following the header. In this case, first_row includes
// multiple line terminators at the end, as the new recStart belongs to
// a line that comes after the blank line(s)
if (!quotation && first_row[pos] == opts.terminator) {
break;
}
} else {
// This is the first data row, add the automatically generated name
col_names.push_back(prefix + std::to_string(num_cols));
}
num_cols++;
// Skip adjacent delimiters if delim_whitespace is set
while (opts.multi_delimiter && pos < first_row.size() &&
first_row[pos] == opts.delimiter &&
first_row[pos + 1] == opts.delimiter) {
++pos;
}
prev = pos + 1;
}
}
return col_names;
}
table_with_metadata reader::impl::read(size_t range_offset,
size_t range_size, int skip_rows,
int skip_end_rows, int num_rows,
hipStream_t stream) {
std::vector<std::unique_ptr<column>> out_columns;
table_metadata metadata;
if (range_offset > 0 || range_size > 0) {
CUDF_EXPECTS(compression_type_ == "none",
"Reading compressed data using `byte range` is unsupported");
}
size_t map_range_size = 0;
if (range_size != 0) {
const auto num_columns = ::max(args_.names.size(), args_.dtype.size());
map_range_size = range_size + calculateMaxRowSize(num_columns);
}
// Support delayed opening of the file if using memory mapping datasource
// This allows only mapping of a subset of the file if using byte range
if (source_ == nullptr) {
assert(!filepath_.empty());
source_ = datasource::create(filepath_, range_offset, map_range_size);
}
// Return an empty dataframe if no data and no column metadata to process
if (source_->empty() && (args_.names.empty() || args_.dtype.empty())) {
return { std::make_unique<table>(std::move(out_columns)), std::move(metadata) };
}
// Transfer source data to GPU
if (!source_->empty()) {
const char *h_uncomp_data = nullptr;
size_t h_uncomp_size = 0;
auto data_size = (map_range_size != 0) ? map_range_size : source_->size();
auto buffer = source_->get_buffer(range_offset, data_size);
std::vector<char> h_uncomp_data_owner;
if (compression_type_ == "none") {
// Do not use the owner vector here to avoid extra copy
h_uncomp_data = reinterpret_cast<const char *>(buffer->data());
h_uncomp_size = buffer->size();
} else {
CUDF_EXPECTS(
getUncompressedHostData(
reinterpret_cast<const char *>(buffer->data()), buffer->size(),
compression_type_, h_uncomp_data_owner) == GDF_SUCCESS,
"Cannot decompress data");
h_uncomp_data = h_uncomp_data_owner.data();
h_uncomp_size = h_uncomp_data_owner.size();
}
// None of the parameters for row selection is used, we are parsing the entire file
const bool load_whole_file = range_offset == 0 && range_size == 0 &&
skip_rows == 0 && skip_end_rows == 0 &&
num_rows == -1;
// Preload the intput data to device
if (load_whole_file)
data_ = rmm::device_buffer(h_uncomp_data, h_uncomp_size);
// Pass nullptr for the device data is the data is not preloaded (will cause additional copies)
gather_row_offsets(h_uncomp_data, h_uncomp_size, range_offset, stream, (load_whole_file ? &data_ : nullptr));
auto row_range = select_rows(h_uncomp_data, h_uncomp_size, range_size,
skip_rows, skip_end_rows, num_rows, stream);
data_size = row_range.second - row_range.first;
CUDF_EXPECTS(data_size <= h_uncomp_size, "Row range exceeds data size");
num_bits = (data_size + 63) / 64;
if (load_whole_file){
// Loaded the whole file, add the start offset (e.g. empty rows) to the pointer
data_ptr = static_cast<char *>(data_.data()) + row_range.first;
}
else{
// The start offset is applied to the device data buffer
data_ = rmm::device_buffer(h_uncomp_data + row_range.first, data_size);
data_ptr = static_cast<char *>(data_.data());
}
}
// Check if the user gave us a list of column names
if (not args_.names.empty()) {
h_column_flags.resize(args_.names.size(), column_parse::enabled);
col_names = args_.names;
} else {
col_names = setColumnNames(header, opts, args_.header, args_.prefix);
num_actual_cols = num_active_cols = col_names.size();
h_column_flags.resize(num_actual_cols, column_parse::enabled);
// Rename empty column names to "Unnamed: col_index"
for (size_t col_idx = 0; col_idx < col_names.size(); ++col_idx) {
if (col_names[col_idx].empty()) {
col_names[col_idx] = string("Unnamed: ") + std::to_string(col_idx);
}
}
// Looking for duplicates
std::unordered_map<string, int> col_names_histogram;
for (auto &col_name : col_names) {
// Operator [] inserts a default-initialized value if the given key is not
// present
if (++col_names_histogram[col_name] > 1) {
if (args_.mangle_dupe_cols) {
// Rename duplicates of column X as X.1, X.2, ...; First appearance
// stays as X
col_name += "." + std::to_string(col_names_histogram[col_name] - 1);
} else {
// All duplicate columns will be ignored; First appearance is parsed
const auto idx = &col_name - col_names.data();
h_column_flags[idx] = column_parse::disabled;
}
}
}
// Update the number of columns to be processed, if some might have been
// removed
if (!args_.mangle_dupe_cols) {
num_active_cols = col_names_histogram.size();
}
}
// User can specify which columns should be parsed
if (!args_.use_cols_indexes.empty() || !args_.use_cols_names.empty()) {
std::fill(h_column_flags.begin(), h_column_flags.end(),
column_parse::disabled);
for (const auto index : args_.use_cols_indexes) {
h_column_flags[index] = column_parse::enabled;
}
num_active_cols = args_.use_cols_indexes.size();
for (const auto name : args_.use_cols_names) {
const auto it = std::find(col_names.begin(), col_names.end(), name);
if (it != col_names.end()) {
h_column_flags[it - col_names.begin()] = column_parse::enabled;
num_active_cols++;
}
}
}
// User can specify which columns should be inferred as datetime
if (!args_.infer_date_indexes.empty() || !args_.infer_date_names.empty()) {
for (const auto index : args_.infer_date_indexes) {
h_column_flags[index] |= column_parse::as_datetime;
}
for (const auto name : args_.infer_date_names) {
auto it = std::find(col_names.begin(), col_names.end(), name);
if (it != col_names.end()) {
h_column_flags[it - col_names.begin()] |= column_parse::as_datetime;
}
}
}
// Return empty table rather than exception if nothing to load
if (num_active_cols == 0) {
return { std::make_unique<table>(std::move(out_columns)), std::move(metadata) };
}
std::vector<data_type> column_types = gather_column_types(stream);
// Alloc output; columns' data memory is still expected for empty dataframe
std::vector<column_buffer> out_buffers;
for (int col = 0, active_col = 0; col < num_actual_cols; ++col) {
if (h_column_flags[col] & column_parse::enabled) {
out_buffers.emplace_back(column_types[active_col], num_records, true,
stream, mr_);
metadata.column_names.emplace_back(col_names[col]);
active_col++;
}
}
if (num_records != 0) {
decode_data(column_types, out_buffers, stream);
}
for (size_t i = 0; i < column_types.size(); ++i) {
out_columns.emplace_back(
make_column(column_types[i], num_records, out_buffers[i]));
}
// TODO: String columns need to be reworked to actually copy characters in
// kernel to allow skipping quotation characters
/*for (auto &column : columns) {
column.finalize();
// PANDAS' default behavior of enabling doublequote for two consecutive
// quotechars in quoted fields results in reduction to a single quotechar
if (column->dtype == GDF_STRING &&
(opts.quotechar != '\0' && opts.doublequote == true)) {
const std::string quotechar(1, opts.quotechar);
const std::string dblquotechar(2, opts.quotechar);
auto str_data = static_cast<NVStrings *>(column->data);
column->data = str_data->replace(dblquotechar.c_str(), quotechar.c_str());
NVStrings::destroy(str_data);
}
}*/
return { std::make_unique<table>(std::move(out_columns)), std::move(metadata) };
}
void reader::impl::gather_row_offsets(const char *h_data, size_t h_size,
size_t range_offset,
hipStream_t stream, const rmm::device_buffer* d_data) {
// Account for the start and end of row region offsets
const bool require_first_line_start = (range_offset == 0);
const bool require_last_line_end = (h_data[h_size - 1] != opts.terminator);
auto symbols = (opts.quotechar != '\0')
? std::vector<char>{opts.terminator, opts.quotechar}
: std::vector<char>{opts.terminator};
cudf::size_type num_rows = (require_first_line_start ? 1 : 0);
if (d_data){
// preloaded to device memory
num_rows += count_all_from_set(*d_data, symbols);
}
else{
num_rows += count_all_from_set(h_data, h_size, symbols);
}
const auto num_offsets = num_rows + (require_last_line_end ? 1 : 0);
row_offsets.resize(num_offsets);
auto ptr_first = row_offsets.data().get();
auto ptr_last = ptr_first + num_rows;
if (require_first_line_start) {
ptr_first++;
const uint64_t first_entry = 0;
row_offsets.front() = first_entry;
}
if (require_last_line_end) {
const uint64_t last_entry = h_size;
row_offsets.back() = last_entry;
}
// Passing offset = 1 to return positions AFTER the found character
if (d_data){
find_all_from_set(*d_data, symbols, 1, ptr_first);
}
else{
find_all_from_set(h_data, h_size, symbols, 1, ptr_first);
}
// Sort the row info according to ascending start offset
// Subsequent processing (filtering, etc.) may require row order
thrust::sort(rmm::exec_policy(stream)->on(stream), ptr_first, ptr_last);
}
std::pair<uint64_t, uint64_t> reader::impl::select_rows(
const char *h_data, size_t h_size, size_t range_size,
cudf::size_type skip_rows, cudf::size_type skip_end_rows,
cudf::size_type num_rows, hipStream_t stream) {
thrust::host_vector<uint64_t> h_row_offsets = row_offsets;
auto it_begin = h_row_offsets.begin();
auto it_end = h_row_offsets.end();
assert(std::distance(it_begin, it_end) >= 1);
// Currently, ignoring lineterminations within quotes is handled by recording
// the records of both, and then filtering out the records that is a quotechar
// or a linetermination within a quotechar pair.
if (opts.quotechar != '\0') {
auto count = std::distance(it_begin, it_end) - 1;
// First element is zero if reading from start of file, skip it in that case
// Check the first element otherwise, it could be a quotation
const int start = (h_row_offsets[0] == 0) ? 1 : 0;
// Starting in the incomplete first row (before first line terminator in the byte range)?
bool is_partial_row = (h_row_offsets[0] != 0);
auto filtered_count = count;
bool quotation = false;
for (int i = start; i < count; ++i) {
auto& offset = h_row_offsets[i];
if (offset > 0 && h_data[offset - 1] == opts.quotechar) {
// Don't update the quotation state before hitting the first line terminator
if (!is_partial_row) {
quotation = !quotation;
}
offset = static_cast<uint64_t>(-1);
filtered_count--;
} else if (offset > 0 && h_data[offset - 1] == opts.terminator) {
if (quotation){
offset = static_cast<uint64_t>(-1);
filtered_count--;
}
else if (is_partial_row){
// Hit the the first line terminator, reset the is_partial_row flag
is_partial_row = false;
}
}
}
if (filtered_count != count) {
it_end = std::remove_if(it_begin, it_end, [](uint64_t pos) {
return (pos == static_cast<uint64_t>(-1));
});
}
}
// Exclude the rows that are to be skipped from the start
if (skip_rows != 0 && skip_rows < std::distance(it_begin, it_end)) {
it_begin += skip_rows;
}
// Exclude the rows outside of requested range
if (range_size != 0) {
auto it = it_end - 1;
while (it >= it_begin && *it > static_cast<uint64_t>(range_size)) {
--it;
}
if ((it + 2) < it_end) {
it_end = it + 2;
}
}
// Exclude the rows without data
if (opts.skipblanklines || opts.comment != '\0') {
const auto newline = opts.skipblanklines ? opts.terminator : opts.comment;
const auto comment = opts.comment != '\0' ? opts.comment : newline;
const auto carriage =
(opts.skipblanklines && opts.terminator == '\n') ? '\r' : comment;
it_end = std::remove_if(it_begin, it_end, [=, &h_data](uint64_t pos) {
return ((pos != h_size) &&
(h_data[pos] == newline || h_data[pos] == comment ||
h_data[pos] == carriage));
});
}
// Exclude the rows before the header row (inclusive)
if (std::distance(it_begin, it_end) > 1) {
if (args_.header == -1) {
header.assign(h_data + *(it_begin), h_data + *(it_begin + 1));
} else {
header.assign(h_data + *(it_begin + args_.header),
h_data + *(it_begin + args_.header + 1));
it_begin += args_.header + 1;
}
}
// Exclude the rows that exceed past the requested number
if (num_rows >= 0 && num_rows < std::distance(it_begin, it_end)) {
it_end = it_begin + num_rows + 1;
}
// Exclude the rows that are to be skipped from the end
if (skip_end_rows != 0 && skip_end_rows < std::distance(it_begin, it_end)) {
it_end -= skip_end_rows;
}
const uint64_t offset_start = *it_begin;
const uint64_t offset_end = *(it_end - 1);
// Copy out the row starts to use for row-column data parsing
if (offset_start != offset_end) {
if (offset_start != 0) {
for (auto it = it_begin; it != it_end; ++it) {
*it -= offset_start;
}
}
CUDA_TRY(hipMemcpyAsync(row_offsets.data().get(), &(*it_begin),
std::distance(it_begin, it_end) * sizeof(uint64_t),
hipMemcpyHostToDevice, stream));
// Exclude the end-of-data row from number of rows with actual data
num_records = std::distance(it_begin, it_end) - 1;
}
return std::make_pair(offset_start, offset_end);
}
std::vector<data_type> reader::impl::gather_column_types(hipStream_t stream) {
std::vector<data_type> dtypes;
if (args_.dtype.empty()) {
if (num_records == 0) {
dtypes.resize(num_active_cols, data_type{EMPTY});
} else {
d_column_flags = h_column_flags;
hostdevice_vector<column_parse::stats> column_stats(num_active_cols);
CUDA_TRY(hipMemsetAsync(column_stats.device_ptr(), 0,
column_stats.memory_size(), stream));
CUDA_TRY(cudf::io::csv::gpu::DetectColumnTypes(
data_ptr, row_offsets.data().get(),
num_records, num_actual_cols, opts, d_column_flags.data().get(),
column_stats.device_ptr(), stream));
CUDA_TRY(hipMemcpyAsync(
column_stats.host_ptr(), column_stats.device_ptr(),
column_stats.memory_size(), hipMemcpyDeviceToHost, stream));
CUDA_TRY(hipStreamSynchronize(stream));
for (int col = 0; col < num_active_cols; col++) {
unsigned long long countInt =
column_stats[col].countInt8 + column_stats[col].countInt16 +
column_stats[col].countInt32 + column_stats[col].countInt64;
if (column_stats[col].countNULL == num_records) {
// Entire column is NULL; allocate the smallest amount of memory
dtypes.emplace_back(cudf::type_id::INT8);
} else if (column_stats[col].countString > 0L) {
dtypes.emplace_back(cudf::type_id::STRING);
} else if (column_stats[col].countDateAndTime > 0L) {
dtypes.emplace_back(cudf::type_id::TIMESTAMP_NANOSECONDS);
} else if (column_stats[col].countBool > 0L) {
dtypes.emplace_back(cudf::type_id::BOOL8);
} else if (column_stats[col].countFloat > 0L ||
(column_stats[col].countFloat == 0L && countInt > 0L &&
column_stats[col].countNULL > 0L)) {
// The second condition has been added to conform to
// PANDAS which states that a column of integers with
// a single NULL record need to be treated as floats.
dtypes.emplace_back(cudf::type_id::FLOAT64);
} else {
// All other integers are stored as 64-bit to conform to PANDAS
dtypes.emplace_back(cudf::type_id::INT64);
}
}
}
} else {
const bool is_dict = std::all_of(
args_.dtype.begin(), args_.dtype.end(),
[](const auto &s) { return s.find(':') != std::string::npos; });
if (!is_dict) {
if (args_.dtype.size() == 1) {
// If it's a single dtype, assign that dtype to all active columns
data_type dtype_;
column_parse::flags col_flags_;
std::tie(dtype_, col_flags_) = get_dtype_info(args_.dtype[0]);
dtypes.resize(num_active_cols, dtype_);
for (int col = 0; col < num_actual_cols; col++) {
h_column_flags[col] |= col_flags_;
}
CUDF_EXPECTS(dtypes.back().id() != cudf::type_id::EMPTY,
"Unsupported data type");
} else {
// If it's a list, assign dtypes to active columns in the given order
CUDF_EXPECTS(static_cast<int>(args_.dtype.size()) >= num_actual_cols,
"Must specify data types for all columns");
auto dtype_ = std::back_inserter(dtypes);
for (int col = 0; col < num_actual_cols; col++) {
if (h_column_flags[col] & column_parse::enabled) {
column_parse::flags col_flags_;
std::tie(dtype_, col_flags_) = get_dtype_info(args_.dtype[col]);
h_column_flags[col] |= col_flags_;
CUDF_EXPECTS(dtypes.back().id() != cudf::type_id::EMPTY,
"Unsupported data type");
}
}
}
} else {
// Translate vector of `name : dtype` strings to map
// NOTE: Incoming pairs can be out-of-order from column names in dataset
std::unordered_map<std::string, std::string> col_type_map;
for (const auto &pair : args_.dtype) {
const auto pos = pair.find_last_of(':');
const auto name = pair.substr(0, pos);
const auto dtype = pair.substr(pos + 1, pair.size());
col_type_map[name] = dtype;
}
auto dtype_ = std::back_inserter(dtypes);
for (int col = 0; col < num_actual_cols; col++) {
if (h_column_flags[col] & column_parse::enabled) {
CUDF_EXPECTS(col_type_map.find(col_names[col]) != col_type_map.end(),
"Must specify data types for all active columns");
column_parse::flags col_flags_;
std::tie(dtype_, col_flags_) =
get_dtype_info(col_type_map[col_names[col]]);
h_column_flags[col] |= col_flags_;
CUDF_EXPECTS(dtypes.back().id() != cudf::type_id::EMPTY,
"Unsupported data type");
}
}
}
}
if (args_.timestamp_type.id() != cudf::type_id::EMPTY) {
for (auto &type : dtypes) {
if (cudf::is_timestamp(type)) {
type = args_.timestamp_type;
}
}
}
return dtypes;
}
void reader::impl::decode_data(const std::vector<data_type> &column_types,
std::vector<column_buffer> &out_buffers,
hipStream_t stream) {
thrust::host_vector<void *> h_data(num_active_cols);
thrust::host_vector<bitmask_type *> h_valid(num_active_cols);
for (int i = 0; i < num_active_cols; ++i) {
h_data[i] = out_buffers[i].data();
h_valid[i] = out_buffers[i].null_mask();
}
rmm::device_vector<data_type> d_dtypes(column_types);
rmm::device_vector<void *> d_data = h_data;
rmm::device_vector<bitmask_type *> d_valid = h_valid;
d_column_flags = h_column_flags;
CUDA_TRY(cudf::io::csv::gpu::DecodeRowColumnData(
data_ptr, row_offsets.data().get(),
num_records, num_actual_cols, opts, d_column_flags.data().get(),
d_dtypes.data().get(), d_data.data().get(), d_valid.data().get(),
stream));
CUDA_TRY(hipStreamSynchronize(stream));
for (int i = 0; i < num_active_cols; ++i) {
out_buffers[i].null_count() = UNKNOWN_NULL_COUNT;
}
}
reader::impl::impl(std::unique_ptr<datasource> source, std::string filepath,
reader_options const &options,
rmm::mr::device_memory_resource *mr)
: source_(std::move(source)), mr_(mr), filepath_(filepath), args_(options) {
num_actual_cols = args_.names.size();
num_active_cols = args_.names.size();
if (args_.delim_whitespace) {
opts.delimiter = ' ';
opts.multi_delimiter = true;
} else {
opts.delimiter = args_.delimiter;
opts.multi_delimiter = false;
}
opts.terminator = args_.lineterminator;
if (args_.quotechar != '\0' && args_.quoting != quote_style::NONE) {
opts.quotechar = args_.quotechar;
opts.keepquotes = false;
opts.doublequote = args_.doublequote;
} else {
opts.quotechar = '\0';
opts.keepquotes = true;
opts.doublequote = false;
}
opts.skipblanklines = args_.skip_blank_lines;
opts.comment = args_.comment;
opts.dayfirst = args_.dayfirst;
opts.decimal = args_.decimal;
opts.thousands = args_.thousands;
CUDF_EXPECTS(opts.decimal != opts.delimiter,
"Decimal point cannot be the same as the delimiter");
CUDF_EXPECTS(opts.thousands != opts.delimiter,
"Thousands separator cannot be the same as the delimiter");
compression_type_ = infer_compression_type(
args_.compression, filepath,
{{"gz", "gzip"}, {"zip", "zip"}, {"bz2", "bz2"}, {"xz", "xz"}});
// Handle user-defined false values, whereby field data is substituted with a
// boolean true or numeric `1` value
if (args_.true_values.size() != 0) {
d_trueTrie = createSerializedTrie(args_.true_values);
opts.trueValuesTrie = d_trueTrie.data().get();
}
// Handle user-defined false values, whereby field data is substituted with a
// boolean false or numeric `0` value
if (args_.false_values.size() != 0) {
d_falseTrie = createSerializedTrie(args_.false_values);
opts.falseValuesTrie = d_falseTrie.data().get();
}
// Handle user-defined N/A values, whereby field data is treated as null
if (args_.na_values.size() != 0) {
d_naTrie = createSerializedTrie(args_.na_values);
opts.naValuesTrie = d_naTrie.data().get();
}
}
// Forward to implementation
reader::reader(std::string filepath, reader_options const &options,
rmm::mr::device_memory_resource *mr)
: _impl(std::make_unique<impl>(nullptr, filepath, options, mr)) {
// Delay actual instantiation of data source until read to allow for
// partial memory mapping of file using byte ranges
}
// Forward to implementation
reader::reader(const char *buffer, size_t length, reader_options const &options,
rmm::mr::device_memory_resource *mr)
: _impl(std::make_unique<impl>(datasource::create(buffer, length), "",
options, mr)) {}
// Forward to implementation
reader::reader(std::shared_ptr<arrow::io::RandomAccessFile> file,
reader_options const &options,
rmm::mr::device_memory_resource *mr)
: _impl(std::make_unique<impl>(datasource::create(file), "", options, mr)) {
}
// Destructor within this translation unit
reader::~reader() = default;
// Forward to implementation
table_with_metadata reader::read_all(hipStream_t stream) {
return _impl->read(0, 0, 0, 0, -1, stream);
}
// Forward to implementation
table_with_metadata reader::read_byte_range(size_t offset, size_t size,
hipStream_t stream) {
return _impl->read(offset, size, 0, 0, -1, stream);
}
// Forward to implementation
table_with_metadata reader::read_rows(size_type num_skip_header,
size_type num_skip_footer,
size_type num_rows,
hipStream_t stream) {
CUDF_EXPECTS(num_rows == -1 || num_skip_footer == 0,
"Cannot use both `num_rows` and `num_skip_footer`");
return _impl->read(0, 0, num_skip_header, num_skip_footer, num_rows, stream);
}
} // namespace csv
} // namespace detail
} // namespace io
} // namespace experimental
} // namespace cudf
| 3f13240cfd9111aa63e7c4578c272e0537c9fe65.cu | /*
* Copyright (c) 2019, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
/**
* @file reader_impl.cu
* @brief cuDF-IO CSV reader class implementation
**/
#include "reader_impl.hpp"
#include <algorithm>
#include <iostream>
#include <numeric>
#include <tuple>
#include <unordered_map>
#include "legacy/datetime_parser.cuh"
#include "legacy/type_conversion.cuh"
#include <utilities/legacy/cudf_utils.h>
#include <cudf/legacy/unary.hpp>
#include <cudf/table/table.hpp>
#include <cudf/utilities/error.hpp>
#include <nvstrings/NVStrings.h>
#include <io/comp/io_uncomp.h>
#include <io/utilities/parsing_utils.cuh>
#include <io/utilities/type_conversion.cuh>
using std::string;
using std::vector;
namespace cudf {
namespace experimental {
namespace io {
namespace detail {
namespace csv {
using namespace cudf::io::csv;
using namespace cudf::io;
/**---------------------------------------------------------------------------*
* @brief Estimates the maximum expected length or a row, based on the number
* of columns
*
* If the number of columns is not available, it will return a value large
* enough for most use cases
*
* @param[in] num_columns Number of columns in the CSV file (optional)
*
* @return Estimated maximum size of a row, in bytes
*---------------------------------------------------------------------------**/
constexpr size_t calculateMaxRowSize(int num_columns = 0) noexcept {
constexpr size_t max_row_bytes = 16 * 1024; // 16KB
constexpr size_t column_bytes = 64;
constexpr size_t base_padding = 1024; // 1KB
if (num_columns == 0) {
// Use flat size if the number of columns is not known
return max_row_bytes;
} else {
// Expand the size based on the number of columns, if available
return base_padding + num_columns * column_bytes;
}
}
/**
* @brief Translates a dtype string and returns its dtype enumeration and any
* extended dtype flags that are supported by cuIO. Often, this is a column
* with the same underlying dtype the basic types, but with different parsing
* interpretations.
*
* @param[in] dtype String containing the basic or extended dtype
*
* @return std::pair<gdf_dtype, column_parse::flags> Tuple of dtype and flags
*/
std::tuple<data_type, column_parse::flags> get_dtype_info(
const std::string &dtype) {
if (dtype == "hex" || dtype == "hex64") {
return std::make_tuple(data_type{cudf::type_id::INT64},
column_parse::as_hexadecimal);
}
if (dtype == "hex32") {
return std::make_tuple(data_type{cudf::type_id::INT32},
column_parse::as_hexadecimal);
}
return std::make_tuple(convert_string_to_dtype(dtype), column_parse::as_default);
}
/**
* @brief Removes the first and Last quote in the string
*/
string removeQuotes(string str, char quotechar) {
// Exclude first and last quotation char
const size_t first_quote = str.find(quotechar);
if (first_quote != string::npos) {
str.erase(first_quote, 1);
}
const size_t last_quote = str.rfind(quotechar);
if (last_quote != string::npos) {
str.erase(last_quote, 1);
}
return str;
}
/**
* @brief Parse the first row to set the column names in the raw_csv parameter.
* The first row can be either the header row, or the first data row
*/
std::vector<std::string> setColumnNames(std::vector<char> const &header,
ParseOptions const &opts,
int header_row, std::string prefix) {
std::vector<std::string> col_names;
// If there is only a single character then it would be the terminator
if (header.size() <= 1) {
return col_names;
}
std::vector<char> first_row = header;
int num_cols = 0;
bool quotation = false;
for (size_t pos = 0, prev = 0; pos < first_row.size(); ++pos) {
// Flip the quotation flag if current character is a quotechar
if (first_row[pos] == opts.quotechar) {
quotation = !quotation;
}
// Check if end of a column/row
else if (pos == first_row.size() - 1 ||
(!quotation && first_row[pos] == opts.terminator) ||
(!quotation && first_row[pos] == opts.delimiter)) {
// This is the header, add the column name
if (header_row >= 0) {
// Include the current character, in case the line is not terminated
int col_name_len = pos - prev + 1;
// Exclude the delimiter/terminator is present
if (first_row[pos] == opts.delimiter ||
first_row[pos] == opts.terminator) {
--col_name_len;
}
// Also exclude '\r' character at the end of the column name if it's
// part of the terminator
if (col_name_len > 0 && opts.terminator == '\n' &&
first_row[pos] == '\n' && first_row[pos - 1] == '\r') {
--col_name_len;
}
const string new_col_name(first_row.data() + prev, col_name_len);
col_names.push_back(removeQuotes(new_col_name, opts.quotechar));
// Stop parsing when we hit the line terminator; relevant when there is
// a blank line following the header. In this case, first_row includes
// multiple line terminators at the end, as the new recStart belongs to
// a line that comes after the blank line(s)
if (!quotation && first_row[pos] == opts.terminator) {
break;
}
} else {
// This is the first data row, add the automatically generated name
col_names.push_back(prefix + std::to_string(num_cols));
}
num_cols++;
// Skip adjacent delimiters if delim_whitespace is set
while (opts.multi_delimiter && pos < first_row.size() &&
first_row[pos] == opts.delimiter &&
first_row[pos + 1] == opts.delimiter) {
++pos;
}
prev = pos + 1;
}
}
return col_names;
}
table_with_metadata reader::impl::read(size_t range_offset,
size_t range_size, int skip_rows,
int skip_end_rows, int num_rows,
cudaStream_t stream) {
std::vector<std::unique_ptr<column>> out_columns;
table_metadata metadata;
if (range_offset > 0 || range_size > 0) {
CUDF_EXPECTS(compression_type_ == "none",
"Reading compressed data using `byte range` is unsupported");
}
size_t map_range_size = 0;
if (range_size != 0) {
const auto num_columns = std::max(args_.names.size(), args_.dtype.size());
map_range_size = range_size + calculateMaxRowSize(num_columns);
}
// Support delayed opening of the file if using memory mapping datasource
// This allows only mapping of a subset of the file if using byte range
if (source_ == nullptr) {
assert(!filepath_.empty());
source_ = datasource::create(filepath_, range_offset, map_range_size);
}
// Return an empty dataframe if no data and no column metadata to process
if (source_->empty() && (args_.names.empty() || args_.dtype.empty())) {
return { std::make_unique<table>(std::move(out_columns)), std::move(metadata) };
}
// Transfer source data to GPU
if (!source_->empty()) {
const char *h_uncomp_data = nullptr;
size_t h_uncomp_size = 0;
auto data_size = (map_range_size != 0) ? map_range_size : source_->size();
auto buffer = source_->get_buffer(range_offset, data_size);
std::vector<char> h_uncomp_data_owner;
if (compression_type_ == "none") {
// Do not use the owner vector here to avoid extra copy
h_uncomp_data = reinterpret_cast<const char *>(buffer->data());
h_uncomp_size = buffer->size();
} else {
CUDF_EXPECTS(
getUncompressedHostData(
reinterpret_cast<const char *>(buffer->data()), buffer->size(),
compression_type_, h_uncomp_data_owner) == GDF_SUCCESS,
"Cannot decompress data");
h_uncomp_data = h_uncomp_data_owner.data();
h_uncomp_size = h_uncomp_data_owner.size();
}
// None of the parameters for row selection is used, we are parsing the entire file
const bool load_whole_file = range_offset == 0 && range_size == 0 &&
skip_rows == 0 && skip_end_rows == 0 &&
num_rows == -1;
// Preload the intput data to device
if (load_whole_file)
data_ = rmm::device_buffer(h_uncomp_data, h_uncomp_size);
// Pass nullptr for the device data is the data is not preloaded (will cause additional copies)
gather_row_offsets(h_uncomp_data, h_uncomp_size, range_offset, stream, (load_whole_file ? &data_ : nullptr));
auto row_range = select_rows(h_uncomp_data, h_uncomp_size, range_size,
skip_rows, skip_end_rows, num_rows, stream);
data_size = row_range.second - row_range.first;
CUDF_EXPECTS(data_size <= h_uncomp_size, "Row range exceeds data size");
num_bits = (data_size + 63) / 64;
if (load_whole_file){
// Loaded the whole file, add the start offset (e.g. empty rows) to the pointer
data_ptr = static_cast<char *>(data_.data()) + row_range.first;
}
else{
// The start offset is applied to the device data buffer
data_ = rmm::device_buffer(h_uncomp_data + row_range.first, data_size);
data_ptr = static_cast<char *>(data_.data());
}
}
// Check if the user gave us a list of column names
if (not args_.names.empty()) {
h_column_flags.resize(args_.names.size(), column_parse::enabled);
col_names = args_.names;
} else {
col_names = setColumnNames(header, opts, args_.header, args_.prefix);
num_actual_cols = num_active_cols = col_names.size();
h_column_flags.resize(num_actual_cols, column_parse::enabled);
// Rename empty column names to "Unnamed: col_index"
for (size_t col_idx = 0; col_idx < col_names.size(); ++col_idx) {
if (col_names[col_idx].empty()) {
col_names[col_idx] = string("Unnamed: ") + std::to_string(col_idx);
}
}
// Looking for duplicates
std::unordered_map<string, int> col_names_histogram;
for (auto &col_name : col_names) {
// Operator [] inserts a default-initialized value if the given key is not
// present
if (++col_names_histogram[col_name] > 1) {
if (args_.mangle_dupe_cols) {
// Rename duplicates of column X as X.1, X.2, ...; First appearance
// stays as X
col_name += "." + std::to_string(col_names_histogram[col_name] - 1);
} else {
// All duplicate columns will be ignored; First appearance is parsed
const auto idx = &col_name - col_names.data();
h_column_flags[idx] = column_parse::disabled;
}
}
}
// Update the number of columns to be processed, if some might have been
// removed
if (!args_.mangle_dupe_cols) {
num_active_cols = col_names_histogram.size();
}
}
// User can specify which columns should be parsed
if (!args_.use_cols_indexes.empty() || !args_.use_cols_names.empty()) {
std::fill(h_column_flags.begin(), h_column_flags.end(),
column_parse::disabled);
for (const auto index : args_.use_cols_indexes) {
h_column_flags[index] = column_parse::enabled;
}
num_active_cols = args_.use_cols_indexes.size();
for (const auto name : args_.use_cols_names) {
const auto it = std::find(col_names.begin(), col_names.end(), name);
if (it != col_names.end()) {
h_column_flags[it - col_names.begin()] = column_parse::enabled;
num_active_cols++;
}
}
}
// User can specify which columns should be inferred as datetime
if (!args_.infer_date_indexes.empty() || !args_.infer_date_names.empty()) {
for (const auto index : args_.infer_date_indexes) {
h_column_flags[index] |= column_parse::as_datetime;
}
for (const auto name : args_.infer_date_names) {
auto it = std::find(col_names.begin(), col_names.end(), name);
if (it != col_names.end()) {
h_column_flags[it - col_names.begin()] |= column_parse::as_datetime;
}
}
}
// Return empty table rather than exception if nothing to load
if (num_active_cols == 0) {
return { std::make_unique<table>(std::move(out_columns)), std::move(metadata) };
}
std::vector<data_type> column_types = gather_column_types(stream);
// Alloc output; columns' data memory is still expected for empty dataframe
std::vector<column_buffer> out_buffers;
for (int col = 0, active_col = 0; col < num_actual_cols; ++col) {
if (h_column_flags[col] & column_parse::enabled) {
out_buffers.emplace_back(column_types[active_col], num_records, true,
stream, mr_);
metadata.column_names.emplace_back(col_names[col]);
active_col++;
}
}
if (num_records != 0) {
decode_data(column_types, out_buffers, stream);
}
for (size_t i = 0; i < column_types.size(); ++i) {
out_columns.emplace_back(
make_column(column_types[i], num_records, out_buffers[i]));
}
// TODO: String columns need to be reworked to actually copy characters in
// kernel to allow skipping quotation characters
/*for (auto &column : columns) {
column.finalize();
// PANDAS' default behavior of enabling doublequote for two consecutive
// quotechars in quoted fields results in reduction to a single quotechar
if (column->dtype == GDF_STRING &&
(opts.quotechar != '\0' && opts.doublequote == true)) {
const std::string quotechar(1, opts.quotechar);
const std::string dblquotechar(2, opts.quotechar);
auto str_data = static_cast<NVStrings *>(column->data);
column->data = str_data->replace(dblquotechar.c_str(), quotechar.c_str());
NVStrings::destroy(str_data);
}
}*/
return { std::make_unique<table>(std::move(out_columns)), std::move(metadata) };
}
void reader::impl::gather_row_offsets(const char *h_data, size_t h_size,
size_t range_offset,
cudaStream_t stream, const rmm::device_buffer* d_data) {
// Account for the start and end of row region offsets
const bool require_first_line_start = (range_offset == 0);
const bool require_last_line_end = (h_data[h_size - 1] != opts.terminator);
auto symbols = (opts.quotechar != '\0')
? std::vector<char>{opts.terminator, opts.quotechar}
: std::vector<char>{opts.terminator};
cudf::size_type num_rows = (require_first_line_start ? 1 : 0);
if (d_data){
// preloaded to device memory
num_rows += count_all_from_set(*d_data, symbols);
}
else{
num_rows += count_all_from_set(h_data, h_size, symbols);
}
const auto num_offsets = num_rows + (require_last_line_end ? 1 : 0);
row_offsets.resize(num_offsets);
auto ptr_first = row_offsets.data().get();
auto ptr_last = ptr_first + num_rows;
if (require_first_line_start) {
ptr_first++;
const uint64_t first_entry = 0;
row_offsets.front() = first_entry;
}
if (require_last_line_end) {
const uint64_t last_entry = h_size;
row_offsets.back() = last_entry;
}
// Passing offset = 1 to return positions AFTER the found character
if (d_data){
find_all_from_set(*d_data, symbols, 1, ptr_first);
}
else{
find_all_from_set(h_data, h_size, symbols, 1, ptr_first);
}
// Sort the row info according to ascending start offset
// Subsequent processing (filtering, etc.) may require row order
thrust::sort(rmm::exec_policy(stream)->on(stream), ptr_first, ptr_last);
}
std::pair<uint64_t, uint64_t> reader::impl::select_rows(
const char *h_data, size_t h_size, size_t range_size,
cudf::size_type skip_rows, cudf::size_type skip_end_rows,
cudf::size_type num_rows, cudaStream_t stream) {
thrust::host_vector<uint64_t> h_row_offsets = row_offsets;
auto it_begin = h_row_offsets.begin();
auto it_end = h_row_offsets.end();
assert(std::distance(it_begin, it_end) >= 1);
// Currently, ignoring lineterminations within quotes is handled by recording
// the records of both, and then filtering out the records that is a quotechar
// or a linetermination within a quotechar pair.
if (opts.quotechar != '\0') {
auto count = std::distance(it_begin, it_end) - 1;
// First element is zero if reading from start of file, skip it in that case
// Check the first element otherwise, it could be a quotation
const int start = (h_row_offsets[0] == 0) ? 1 : 0;
// Starting in the incomplete first row (before first line terminator in the byte range)?
bool is_partial_row = (h_row_offsets[0] != 0);
auto filtered_count = count;
bool quotation = false;
for (int i = start; i < count; ++i) {
auto& offset = h_row_offsets[i];
if (offset > 0 && h_data[offset - 1] == opts.quotechar) {
// Don't update the quotation state before hitting the first line terminator
if (!is_partial_row) {
quotation = !quotation;
}
offset = static_cast<uint64_t>(-1);
filtered_count--;
} else if (offset > 0 && h_data[offset - 1] == opts.terminator) {
if (quotation){
offset = static_cast<uint64_t>(-1);
filtered_count--;
}
else if (is_partial_row){
// Hit the the first line terminator, reset the is_partial_row flag
is_partial_row = false;
}
}
}
if (filtered_count != count) {
it_end = std::remove_if(it_begin, it_end, [](uint64_t pos) {
return (pos == static_cast<uint64_t>(-1));
});
}
}
// Exclude the rows that are to be skipped from the start
if (skip_rows != 0 && skip_rows < std::distance(it_begin, it_end)) {
it_begin += skip_rows;
}
// Exclude the rows outside of requested range
if (range_size != 0) {
auto it = it_end - 1;
while (it >= it_begin && *it > static_cast<uint64_t>(range_size)) {
--it;
}
if ((it + 2) < it_end) {
it_end = it + 2;
}
}
// Exclude the rows without data
if (opts.skipblanklines || opts.comment != '\0') {
const auto newline = opts.skipblanklines ? opts.terminator : opts.comment;
const auto comment = opts.comment != '\0' ? opts.comment : newline;
const auto carriage =
(opts.skipblanklines && opts.terminator == '\n') ? '\r' : comment;
it_end = std::remove_if(it_begin, it_end, [=, &h_data](uint64_t pos) {
return ((pos != h_size) &&
(h_data[pos] == newline || h_data[pos] == comment ||
h_data[pos] == carriage));
});
}
// Exclude the rows before the header row (inclusive)
if (std::distance(it_begin, it_end) > 1) {
if (args_.header == -1) {
header.assign(h_data + *(it_begin), h_data + *(it_begin + 1));
} else {
header.assign(h_data + *(it_begin + args_.header),
h_data + *(it_begin + args_.header + 1));
it_begin += args_.header + 1;
}
}
// Exclude the rows that exceed past the requested number
if (num_rows >= 0 && num_rows < std::distance(it_begin, it_end)) {
it_end = it_begin + num_rows + 1;
}
// Exclude the rows that are to be skipped from the end
if (skip_end_rows != 0 && skip_end_rows < std::distance(it_begin, it_end)) {
it_end -= skip_end_rows;
}
const uint64_t offset_start = *it_begin;
const uint64_t offset_end = *(it_end - 1);
// Copy out the row starts to use for row-column data parsing
if (offset_start != offset_end) {
if (offset_start != 0) {
for (auto it = it_begin; it != it_end; ++it) {
*it -= offset_start;
}
}
CUDA_TRY(cudaMemcpyAsync(row_offsets.data().get(), &(*it_begin),
std::distance(it_begin, it_end) * sizeof(uint64_t),
cudaMemcpyHostToDevice, stream));
// Exclude the end-of-data row from number of rows with actual data
num_records = std::distance(it_begin, it_end) - 1;
}
return std::make_pair(offset_start, offset_end);
}
std::vector<data_type> reader::impl::gather_column_types(cudaStream_t stream) {
std::vector<data_type> dtypes;
if (args_.dtype.empty()) {
if (num_records == 0) {
dtypes.resize(num_active_cols, data_type{EMPTY});
} else {
d_column_flags = h_column_flags;
hostdevice_vector<column_parse::stats> column_stats(num_active_cols);
CUDA_TRY(cudaMemsetAsync(column_stats.device_ptr(), 0,
column_stats.memory_size(), stream));
CUDA_TRY(cudf::io::csv::gpu::DetectColumnTypes(
data_ptr, row_offsets.data().get(),
num_records, num_actual_cols, opts, d_column_flags.data().get(),
column_stats.device_ptr(), stream));
CUDA_TRY(cudaMemcpyAsync(
column_stats.host_ptr(), column_stats.device_ptr(),
column_stats.memory_size(), cudaMemcpyDeviceToHost, stream));
CUDA_TRY(cudaStreamSynchronize(stream));
for (int col = 0; col < num_active_cols; col++) {
unsigned long long countInt =
column_stats[col].countInt8 + column_stats[col].countInt16 +
column_stats[col].countInt32 + column_stats[col].countInt64;
if (column_stats[col].countNULL == num_records) {
// Entire column is NULL; allocate the smallest amount of memory
dtypes.emplace_back(cudf::type_id::INT8);
} else if (column_stats[col].countString > 0L) {
dtypes.emplace_back(cudf::type_id::STRING);
} else if (column_stats[col].countDateAndTime > 0L) {
dtypes.emplace_back(cudf::type_id::TIMESTAMP_NANOSECONDS);
} else if (column_stats[col].countBool > 0L) {
dtypes.emplace_back(cudf::type_id::BOOL8);
} else if (column_stats[col].countFloat > 0L ||
(column_stats[col].countFloat == 0L && countInt > 0L &&
column_stats[col].countNULL > 0L)) {
// The second condition has been added to conform to
// PANDAS which states that a column of integers with
// a single NULL record need to be treated as floats.
dtypes.emplace_back(cudf::type_id::FLOAT64);
} else {
// All other integers are stored as 64-bit to conform to PANDAS
dtypes.emplace_back(cudf::type_id::INT64);
}
}
}
} else {
const bool is_dict = std::all_of(
args_.dtype.begin(), args_.dtype.end(),
[](const auto &s) { return s.find(':') != std::string::npos; });
if (!is_dict) {
if (args_.dtype.size() == 1) {
// If it's a single dtype, assign that dtype to all active columns
data_type dtype_;
column_parse::flags col_flags_;
std::tie(dtype_, col_flags_) = get_dtype_info(args_.dtype[0]);
dtypes.resize(num_active_cols, dtype_);
for (int col = 0; col < num_actual_cols; col++) {
h_column_flags[col] |= col_flags_;
}
CUDF_EXPECTS(dtypes.back().id() != cudf::type_id::EMPTY,
"Unsupported data type");
} else {
// If it's a list, assign dtypes to active columns in the given order
CUDF_EXPECTS(static_cast<int>(args_.dtype.size()) >= num_actual_cols,
"Must specify data types for all columns");
auto dtype_ = std::back_inserter(dtypes);
for (int col = 0; col < num_actual_cols; col++) {
if (h_column_flags[col] & column_parse::enabled) {
column_parse::flags col_flags_;
std::tie(dtype_, col_flags_) = get_dtype_info(args_.dtype[col]);
h_column_flags[col] |= col_flags_;
CUDF_EXPECTS(dtypes.back().id() != cudf::type_id::EMPTY,
"Unsupported data type");
}
}
}
} else {
// Translate vector of `name : dtype` strings to map
// NOTE: Incoming pairs can be out-of-order from column names in dataset
std::unordered_map<std::string, std::string> col_type_map;
for (const auto &pair : args_.dtype) {
const auto pos = pair.find_last_of(':');
const auto name = pair.substr(0, pos);
const auto dtype = pair.substr(pos + 1, pair.size());
col_type_map[name] = dtype;
}
auto dtype_ = std::back_inserter(dtypes);
for (int col = 0; col < num_actual_cols; col++) {
if (h_column_flags[col] & column_parse::enabled) {
CUDF_EXPECTS(col_type_map.find(col_names[col]) != col_type_map.end(),
"Must specify data types for all active columns");
column_parse::flags col_flags_;
std::tie(dtype_, col_flags_) =
get_dtype_info(col_type_map[col_names[col]]);
h_column_flags[col] |= col_flags_;
CUDF_EXPECTS(dtypes.back().id() != cudf::type_id::EMPTY,
"Unsupported data type");
}
}
}
}
if (args_.timestamp_type.id() != cudf::type_id::EMPTY) {
for (auto &type : dtypes) {
if (cudf::is_timestamp(type)) {
type = args_.timestamp_type;
}
}
}
return dtypes;
}
void reader::impl::decode_data(const std::vector<data_type> &column_types,
std::vector<column_buffer> &out_buffers,
cudaStream_t stream) {
thrust::host_vector<void *> h_data(num_active_cols);
thrust::host_vector<bitmask_type *> h_valid(num_active_cols);
for (int i = 0; i < num_active_cols; ++i) {
h_data[i] = out_buffers[i].data();
h_valid[i] = out_buffers[i].null_mask();
}
rmm::device_vector<data_type> d_dtypes(column_types);
rmm::device_vector<void *> d_data = h_data;
rmm::device_vector<bitmask_type *> d_valid = h_valid;
d_column_flags = h_column_flags;
CUDA_TRY(cudf::io::csv::gpu::DecodeRowColumnData(
data_ptr, row_offsets.data().get(),
num_records, num_actual_cols, opts, d_column_flags.data().get(),
d_dtypes.data().get(), d_data.data().get(), d_valid.data().get(),
stream));
CUDA_TRY(cudaStreamSynchronize(stream));
for (int i = 0; i < num_active_cols; ++i) {
out_buffers[i].null_count() = UNKNOWN_NULL_COUNT;
}
}
reader::impl::impl(std::unique_ptr<datasource> source, std::string filepath,
reader_options const &options,
rmm::mr::device_memory_resource *mr)
: source_(std::move(source)), mr_(mr), filepath_(filepath), args_(options) {
num_actual_cols = args_.names.size();
num_active_cols = args_.names.size();
if (args_.delim_whitespace) {
opts.delimiter = ' ';
opts.multi_delimiter = true;
} else {
opts.delimiter = args_.delimiter;
opts.multi_delimiter = false;
}
opts.terminator = args_.lineterminator;
if (args_.quotechar != '\0' && args_.quoting != quote_style::NONE) {
opts.quotechar = args_.quotechar;
opts.keepquotes = false;
opts.doublequote = args_.doublequote;
} else {
opts.quotechar = '\0';
opts.keepquotes = true;
opts.doublequote = false;
}
opts.skipblanklines = args_.skip_blank_lines;
opts.comment = args_.comment;
opts.dayfirst = args_.dayfirst;
opts.decimal = args_.decimal;
opts.thousands = args_.thousands;
CUDF_EXPECTS(opts.decimal != opts.delimiter,
"Decimal point cannot be the same as the delimiter");
CUDF_EXPECTS(opts.thousands != opts.delimiter,
"Thousands separator cannot be the same as the delimiter");
compression_type_ = infer_compression_type(
args_.compression, filepath,
{{"gz", "gzip"}, {"zip", "zip"}, {"bz2", "bz2"}, {"xz", "xz"}});
// Handle user-defined false values, whereby field data is substituted with a
// boolean true or numeric `1` value
if (args_.true_values.size() != 0) {
d_trueTrie = createSerializedTrie(args_.true_values);
opts.trueValuesTrie = d_trueTrie.data().get();
}
// Handle user-defined false values, whereby field data is substituted with a
// boolean false or numeric `0` value
if (args_.false_values.size() != 0) {
d_falseTrie = createSerializedTrie(args_.false_values);
opts.falseValuesTrie = d_falseTrie.data().get();
}
// Handle user-defined N/A values, whereby field data is treated as null
if (args_.na_values.size() != 0) {
d_naTrie = createSerializedTrie(args_.na_values);
opts.naValuesTrie = d_naTrie.data().get();
}
}
// Forward to implementation
reader::reader(std::string filepath, reader_options const &options,
rmm::mr::device_memory_resource *mr)
: _impl(std::make_unique<impl>(nullptr, filepath, options, mr)) {
// Delay actual instantiation of data source until read to allow for
// partial memory mapping of file using byte ranges
}
// Forward to implementation
reader::reader(const char *buffer, size_t length, reader_options const &options,
rmm::mr::device_memory_resource *mr)
: _impl(std::make_unique<impl>(datasource::create(buffer, length), "",
options, mr)) {}
// Forward to implementation
reader::reader(std::shared_ptr<arrow::io::RandomAccessFile> file,
reader_options const &options,
rmm::mr::device_memory_resource *mr)
: _impl(std::make_unique<impl>(datasource::create(file), "", options, mr)) {
}
// Destructor within this translation unit
reader::~reader() = default;
// Forward to implementation
table_with_metadata reader::read_all(cudaStream_t stream) {
return _impl->read(0, 0, 0, 0, -1, stream);
}
// Forward to implementation
table_with_metadata reader::read_byte_range(size_t offset, size_t size,
cudaStream_t stream) {
return _impl->read(offset, size, 0, 0, -1, stream);
}
// Forward to implementation
table_with_metadata reader::read_rows(size_type num_skip_header,
size_type num_skip_footer,
size_type num_rows,
cudaStream_t stream) {
CUDF_EXPECTS(num_rows == -1 || num_skip_footer == 0,
"Cannot use both `num_rows` and `num_skip_footer`");
return _impl->read(0, 0, num_skip_header, num_skip_footer, num_rows, stream);
}
} // namespace csv
} // namespace detail
} // namespace io
} // namespace experimental
} // namespace cudf
|
a40838dae2ae6e08c6528d14895baede2d7972fe.hip | // !!! This is a file automatically generated by hipify!!!
#include <hip/hip_runtime.h>
#include <bits/stdc++.h>
#define BLOCKS 32768*2
#define THREADS 256
#define SIZE BLOCKS*THREADS
using namespace std;
__host__ void printArr(int *arr, int size);
__host__ void randomArrGenerator(int *arr, int size);
__host__ void checkSorted(int *arr, int size);
__device__ void swapCu(int &a, int &b)
{
int temp = a;
a = b;
b = temp;
}
//bitonic sort on GPU
__global__ void bitonicSortCu(int *arr, int i, int j, int size)
{
int k = threadIdx.x + blockIdx.x*blockDim.x;
if(k<size && k%(j<<1) < j)
{
bool descending = (k/i)%2;
if(descending && arr[k] < arr[k+j])
swapCu(arr[k], arr[k+j]);
else if(!descending && arr[k] > arr[k+j])
swapCu(arr[k], arr[k+j]);
}
}
void bitonicSortParallel(int *arr, int size)
{
for(int i=2; i<=size; i*=2)
for(int j=i/2; j>=1; j/=2)
hipLaunchKernelGGL(( bitonicSortCu), dim3(BLOCKS), dim3(THREADS), 0, 0, arr, i, j, size);
}
int main(int argc, char const *argv[])
{
int *d_arr;
int *arr = new int[SIZE];
randomArrGenerator(arr, SIZE);
hipMalloc(&d_arr, sizeof(int)*SIZE);
hipDeviceSynchronize();
//start timer here
hipMemcpyAsync(d_arr, arr, sizeof(int)*SIZE, hipMemcpyHostToDevice);
bitonicSortParallel(d_arr, SIZE);
hipMemcpyAsync(arr, d_arr, sizeof(int)*SIZE, hipMemcpyDeviceToHost);
hipDeviceSynchronize();
//end timer here
checkSorted(arr, SIZE);
return 0;
}
//Auxilliary CPU functions
__host__ void checkSorted(int *arr, int size)
{
for(int i=1; i<size; i++)
if(arr[i] < arr[i-1])
{
cout << "sorting unsuccessful\n";
return;
}
cout << "sorting successful\n";
}
__host__ void randomArrGenerator(int *arr, int size)
{
for(int i=0; i<size; i++)
arr[i] = rand()%1000;
}
__host__ void printArr(int *arr, int size)
{
for(int i=0; i<size; i++)
cout << arr[i] << " ";
cout << endl;
}
__host__ void swap(int &a, int &b)
{
int temp = a;
a = b;
b = temp;
}
//bitonic sort on CPU
__host__ void bitonicSort(int *arr, int size)
{
if(size > 1)
{
for(int i=2; i<=size; i*=2)
{
for(int j=i/2; j>=1; j/=2)
{
for(int k=0; k<size; k++)
{
if(k%(j<<1) < j)
{
bool descending = (k/i)%2;
if(descending && arr[k] < arr[k+j])
swap(arr[k], arr[k+j]);
else if(!descending && arr[k] > arr[k+j])
swap(arr[k], arr[k+j]);
}
}
}
}
}
}
| a40838dae2ae6e08c6528d14895baede2d7972fe.cu | #include <cuda_runtime.h>
#include <bits/stdc++.h>
#define BLOCKS 32768*2
#define THREADS 256
#define SIZE BLOCKS*THREADS
using namespace std;
__host__ void printArr(int *arr, int size);
__host__ void randomArrGenerator(int *arr, int size);
__host__ void checkSorted(int *arr, int size);
__device__ void swapCu(int &a, int &b)
{
int temp = a;
a = b;
b = temp;
}
//bitonic sort on GPU
__global__ void bitonicSortCu(int *arr, int i, int j, int size)
{
int k = threadIdx.x + blockIdx.x*blockDim.x;
if(k<size && k%(j<<1) < j)
{
bool descending = (k/i)%2;
if(descending && arr[k] < arr[k+j])
swapCu(arr[k], arr[k+j]);
else if(!descending && arr[k] > arr[k+j])
swapCu(arr[k], arr[k+j]);
}
}
void bitonicSortParallel(int *arr, int size)
{
for(int i=2; i<=size; i*=2)
for(int j=i/2; j>=1; j/=2)
bitonicSortCu<<<BLOCKS, THREADS>>>(arr, i, j, size);
}
int main(int argc, char const *argv[])
{
int *d_arr;
int *arr = new int[SIZE];
randomArrGenerator(arr, SIZE);
cudaMalloc(&d_arr, sizeof(int)*SIZE);
cudaDeviceSynchronize();
//start timer here
cudaMemcpyAsync(d_arr, arr, sizeof(int)*SIZE, cudaMemcpyHostToDevice);
bitonicSortParallel(d_arr, SIZE);
cudaMemcpyAsync(arr, d_arr, sizeof(int)*SIZE, cudaMemcpyDeviceToHost);
cudaDeviceSynchronize();
//end timer here
checkSorted(arr, SIZE);
return 0;
}
//Auxilliary CPU functions
__host__ void checkSorted(int *arr, int size)
{
for(int i=1; i<size; i++)
if(arr[i] < arr[i-1])
{
cout << "sorting unsuccessful\n";
return;
}
cout << "sorting successful\n";
}
__host__ void randomArrGenerator(int *arr, int size)
{
for(int i=0; i<size; i++)
arr[i] = rand()%1000;
}
__host__ void printArr(int *arr, int size)
{
for(int i=0; i<size; i++)
cout << arr[i] << " ";
cout << endl;
}
__host__ void swap(int &a, int &b)
{
int temp = a;
a = b;
b = temp;
}
//bitonic sort on CPU
__host__ void bitonicSort(int *arr, int size)
{
if(size > 1)
{
for(int i=2; i<=size; i*=2)
{
for(int j=i/2; j>=1; j/=2)
{
for(int k=0; k<size; k++)
{
if(k%(j<<1) < j)
{
bool descending = (k/i)%2;
if(descending && arr[k] < arr[k+j])
swap(arr[k], arr[k+j]);
else if(!descending && arr[k] > arr[k+j])
swap(arr[k], arr[k+j]);
}
}
}
}
}
}
|
c6edb1b39879eda09f7ae9be595ee27807fac37b.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
// Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved.
//
// NVIDIA CORPORATION and its licensors retain all intellectual property
// and proprietary rights in and to this software, related documentation
// and any modifications thereto. Any use, reproduction, disclosure or
// distribution of this software and related documentation without an express
// license agreement from NVIDIA CORPORATION is strictly prohibited.
#include <c10/util/Half.h>
#include "bias_act.h"
//------------------------------------------------------------------------
// Helpers.
template <class T> struct InternalType;
template <> struct InternalType<double> { typedef double scalar_t; };
template <> struct InternalType<float> { typedef float scalar_t; };
template <> struct InternalType<c10::Half> { typedef float scalar_t; };
//------------------------------------------------------------------------
// CUDA kernel.
template <class T, int A>
__global__ void bias_act_kernel(bias_act_kernel_params p)
{
typedef typename InternalType<T>::scalar_t scalar_t;
int G = p.grad;
scalar_t alpha = (scalar_t)p.alpha;
scalar_t gain = (scalar_t)p.gain;
scalar_t clamp = (scalar_t)p.clamp;
scalar_t one = (scalar_t)1;
scalar_t two = (scalar_t)2;
scalar_t expRange = (scalar_t)80;
scalar_t halfExpRange = (scalar_t)40;
scalar_t seluScale = (scalar_t)1.0507009873554804934193349852946;
scalar_t seluAlpha = (scalar_t)1.6732632423543772848170429916717;
// Loop over elements.
int xi = blockIdx.x * p.loopX * blockDim.x + threadIdx.x;
for (int loopIdx = 0; loopIdx < p.loopX && xi < p.sizeX; loopIdx++, xi += blockDim.x)
{
// Load.
scalar_t x = (scalar_t)((const T*)p.x)[xi];
scalar_t b = (p.b) ? (scalar_t)((const T*)p.b)[(xi / p.stepB) % p.sizeB] : 0;
scalar_t xref = (p.xref) ? (scalar_t)((const T*)p.xref)[xi] : 0;
scalar_t yref = (p.yref) ? (scalar_t)((const T*)p.yref)[xi] : 0;
scalar_t dy = (p.dy) ? (scalar_t)((const T*)p.dy)[xi] : one;
scalar_t yy = (gain != 0) ? yref / gain : 0;
scalar_t y = 0;
// Apply bias.
((G == 0) ? x : xref) += b;
// linear
if (A == 1)
{
if (G == 0) y = x;
if (G == 1) y = x;
}
// relu
if (A == 2)
{
if (G == 0) y = (x > 0) ? x : 0;
if (G == 1) y = (yy > 0) ? x : 0;
}
// lrelu
if (A == 3)
{
if (G == 0) y = (x > 0) ? x : x * alpha;
if (G == 1) y = (yy > 0) ? x : x * alpha;
}
// tanh
if (A == 4)
{
if (G == 0) { scalar_t c = exp(x); scalar_t d = one / c; y = (x < -expRange) ? -one : (x > expRange) ? one : (c - d) / (c + d); }
if (G == 1) y = x * (one - yy * yy);
if (G == 2) y = x * (one - yy * yy) * (-two * yy);
}
// sigmoid
if (A == 5)
{
if (G == 0) y = (x < -expRange) ? 0 : one / (exp(-x) + one);
if (G == 1) y = x * yy * (one - yy);
if (G == 2) y = x * yy * (one - yy) * (one - two * yy);
}
// elu
if (A == 6)
{
if (G == 0) y = (x >= 0) ? x : exp(x) - one;
if (G == 1) y = (yy >= 0) ? x : x * (yy + one);
if (G == 2) y = (yy >= 0) ? 0 : x * (yy + one);
}
// selu
if (A == 7)
{
if (G == 0) y = (x >= 0) ? seluScale * x : (seluScale * seluAlpha) * (exp(x) - one);
if (G == 1) y = (yy >= 0) ? x * seluScale : x * (yy + seluScale * seluAlpha);
if (G == 2) y = (yy >= 0) ? 0 : x * (yy + seluScale * seluAlpha);
}
// softplus
if (A == 8)
{
if (G == 0) y = (x > expRange) ? x : log(exp(x) + one);
if (G == 1) y = x * (one - exp(-yy));
if (G == 2) { scalar_t c = exp(-yy); y = x * c * (one - c); }
}
// swish
if (A == 9)
{
if (G == 0)
y = (x < -expRange) ? 0 : x / (exp(-x) + one);
else
{
scalar_t c = exp(xref);
scalar_t d = c + one;
if (G == 1)
y = (xref > halfExpRange) ? x : x * c * (xref + d) / (d * d);
else
y = (xref > halfExpRange) ? 0 : x * c * (xref * (two - d) + two * d) / (d * d * d);
yref = (xref < -expRange) ? 0 : xref / (exp(-xref) + one) * gain;
}
}
// Apply gain.
y *= gain * dy;
// Clamp.
if (clamp >= 0)
{
if (G == 0)
y = (y > -clamp & y < clamp) ? y : (y >= 0) ? clamp : -clamp;
else
y = (yref > -clamp & yref < clamp) ? y : 0;
}
// Store.
((T*)p.y)[xi] = (T)y;
}
}
//------------------------------------------------------------------------
// CUDA kernel selection.
template <class T> void* choose_bias_act_kernel(const bias_act_kernel_params& p)
{
if (p.act == 1) return (void*)bias_act_kernel<T, 1>;
if (p.act == 2) return (void*)bias_act_kernel<T, 2>;
if (p.act == 3) return (void*)bias_act_kernel<T, 3>;
if (p.act == 4) return (void*)bias_act_kernel<T, 4>;
if (p.act == 5) return (void*)bias_act_kernel<T, 5>;
if (p.act == 6) return (void*)bias_act_kernel<T, 6>;
if (p.act == 7) return (void*)bias_act_kernel<T, 7>;
if (p.act == 8) return (void*)bias_act_kernel<T, 8>;
if (p.act == 9) return (void*)bias_act_kernel<T, 9>;
return NULL;
}
//------------------------------------------------------------------------
// Template specializations.
template void* choose_bias_act_kernel<double> (const bias_act_kernel_params& p);
template void* choose_bias_act_kernel<float> (const bias_act_kernel_params& p);
template void* choose_bias_act_kernel<c10::Half> (const bias_act_kernel_params& p);
//------------------------------------------------------------------------
| c6edb1b39879eda09f7ae9be595ee27807fac37b.cu | // Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved.
//
// NVIDIA CORPORATION and its licensors retain all intellectual property
// and proprietary rights in and to this software, related documentation
// and any modifications thereto. Any use, reproduction, disclosure or
// distribution of this software and related documentation without an express
// license agreement from NVIDIA CORPORATION is strictly prohibited.
#include <c10/util/Half.h>
#include "bias_act.h"
//------------------------------------------------------------------------
// Helpers.
template <class T> struct InternalType;
template <> struct InternalType<double> { typedef double scalar_t; };
template <> struct InternalType<float> { typedef float scalar_t; };
template <> struct InternalType<c10::Half> { typedef float scalar_t; };
//------------------------------------------------------------------------
// CUDA kernel.
template <class T, int A>
__global__ void bias_act_kernel(bias_act_kernel_params p)
{
typedef typename InternalType<T>::scalar_t scalar_t;
int G = p.grad;
scalar_t alpha = (scalar_t)p.alpha;
scalar_t gain = (scalar_t)p.gain;
scalar_t clamp = (scalar_t)p.clamp;
scalar_t one = (scalar_t)1;
scalar_t two = (scalar_t)2;
scalar_t expRange = (scalar_t)80;
scalar_t halfExpRange = (scalar_t)40;
scalar_t seluScale = (scalar_t)1.0507009873554804934193349852946;
scalar_t seluAlpha = (scalar_t)1.6732632423543772848170429916717;
// Loop over elements.
int xi = blockIdx.x * p.loopX * blockDim.x + threadIdx.x;
for (int loopIdx = 0; loopIdx < p.loopX && xi < p.sizeX; loopIdx++, xi += blockDim.x)
{
// Load.
scalar_t x = (scalar_t)((const T*)p.x)[xi];
scalar_t b = (p.b) ? (scalar_t)((const T*)p.b)[(xi / p.stepB) % p.sizeB] : 0;
scalar_t xref = (p.xref) ? (scalar_t)((const T*)p.xref)[xi] : 0;
scalar_t yref = (p.yref) ? (scalar_t)((const T*)p.yref)[xi] : 0;
scalar_t dy = (p.dy) ? (scalar_t)((const T*)p.dy)[xi] : one;
scalar_t yy = (gain != 0) ? yref / gain : 0;
scalar_t y = 0;
// Apply bias.
((G == 0) ? x : xref) += b;
// linear
if (A == 1)
{
if (G == 0) y = x;
if (G == 1) y = x;
}
// relu
if (A == 2)
{
if (G == 0) y = (x > 0) ? x : 0;
if (G == 1) y = (yy > 0) ? x : 0;
}
// lrelu
if (A == 3)
{
if (G == 0) y = (x > 0) ? x : x * alpha;
if (G == 1) y = (yy > 0) ? x : x * alpha;
}
// tanh
if (A == 4)
{
if (G == 0) { scalar_t c = exp(x); scalar_t d = one / c; y = (x < -expRange) ? -one : (x > expRange) ? one : (c - d) / (c + d); }
if (G == 1) y = x * (one - yy * yy);
if (G == 2) y = x * (one - yy * yy) * (-two * yy);
}
// sigmoid
if (A == 5)
{
if (G == 0) y = (x < -expRange) ? 0 : one / (exp(-x) + one);
if (G == 1) y = x * yy * (one - yy);
if (G == 2) y = x * yy * (one - yy) * (one - two * yy);
}
// elu
if (A == 6)
{
if (G == 0) y = (x >= 0) ? x : exp(x) - one;
if (G == 1) y = (yy >= 0) ? x : x * (yy + one);
if (G == 2) y = (yy >= 0) ? 0 : x * (yy + one);
}
// selu
if (A == 7)
{
if (G == 0) y = (x >= 0) ? seluScale * x : (seluScale * seluAlpha) * (exp(x) - one);
if (G == 1) y = (yy >= 0) ? x * seluScale : x * (yy + seluScale * seluAlpha);
if (G == 2) y = (yy >= 0) ? 0 : x * (yy + seluScale * seluAlpha);
}
// softplus
if (A == 8)
{
if (G == 0) y = (x > expRange) ? x : log(exp(x) + one);
if (G == 1) y = x * (one - exp(-yy));
if (G == 2) { scalar_t c = exp(-yy); y = x * c * (one - c); }
}
// swish
if (A == 9)
{
if (G == 0)
y = (x < -expRange) ? 0 : x / (exp(-x) + one);
else
{
scalar_t c = exp(xref);
scalar_t d = c + one;
if (G == 1)
y = (xref > halfExpRange) ? x : x * c * (xref + d) / (d * d);
else
y = (xref > halfExpRange) ? 0 : x * c * (xref * (two - d) + two * d) / (d * d * d);
yref = (xref < -expRange) ? 0 : xref / (exp(-xref) + one) * gain;
}
}
// Apply gain.
y *= gain * dy;
// Clamp.
if (clamp >= 0)
{
if (G == 0)
y = (y > -clamp & y < clamp) ? y : (y >= 0) ? clamp : -clamp;
else
y = (yref > -clamp & yref < clamp) ? y : 0;
}
// Store.
((T*)p.y)[xi] = (T)y;
}
}
//------------------------------------------------------------------------
// CUDA kernel selection.
template <class T> void* choose_bias_act_kernel(const bias_act_kernel_params& p)
{
if (p.act == 1) return (void*)bias_act_kernel<T, 1>;
if (p.act == 2) return (void*)bias_act_kernel<T, 2>;
if (p.act == 3) return (void*)bias_act_kernel<T, 3>;
if (p.act == 4) return (void*)bias_act_kernel<T, 4>;
if (p.act == 5) return (void*)bias_act_kernel<T, 5>;
if (p.act == 6) return (void*)bias_act_kernel<T, 6>;
if (p.act == 7) return (void*)bias_act_kernel<T, 7>;
if (p.act == 8) return (void*)bias_act_kernel<T, 8>;
if (p.act == 9) return (void*)bias_act_kernel<T, 9>;
return NULL;
}
//------------------------------------------------------------------------
// Template specializations.
template void* choose_bias_act_kernel<double> (const bias_act_kernel_params& p);
template void* choose_bias_act_kernel<float> (const bias_act_kernel_params& p);
template void* choose_bias_act_kernel<c10::Half> (const bias_act_kernel_params& p);
//------------------------------------------------------------------------
|
2b4c42887dbcbec372b4f3051dc91200cbc96845.hip | // !!! This is a file automatically generated by hipify!!!
#include <cstring>
#include <vector>
#include "hip/hip_runtime.h"
#include "hip/hip_fp16.h"
#include "NvInfer.h"
#include "NvInferPlugin.h"
#include "plugin_utils.h"
#include "PReLUPlugin.h"
#include "spdlog/spdlog.h"
static const char* G_PRELU_TYPE = "PReLU";
static const char* G_PRELU_NAME = "PReLU_TRT"; //plugin_name = plugin_type + plugin_namespace
// CUDA: use 512 threads per block
static const int CUDA_NUM_THREADS = 512;
// CUDA: number of blocks for threads.
inline int CAFFE_GET_BLOCKS(const int N) {
return (N + CUDA_NUM_THREADS - 1) / CUDA_NUM_THREADS;
}
// CUDA: grid stride looping
#define CUDA_KERNEL_LOOP(i, n) \
for (int i = blockIdx.x * blockDim.x + threadIdx.x; \
i < (n); \
i += blockDim.x * gridDim.x)
// /******** PReLU CUDA function ********/
// CUDA kernele for forward
template <typename Ftype>
__global__ void PReLUForward(const int n, const int channels, const int dim,
const Ftype* slope_data,
const Ftype* in, Ftype* out,
const Ftype zero,
const int div_factor) {
CUDA_KERNEL_LOOP(index, n) {
int c = (index / dim) % channels / div_factor;
if(in[index] > zero) {
out[index] = in[index];
} else {
out[index] = in[index] * slope_data[c];
}
}
}
template <typename Ftype>
hipError_t Forward_gpu(const int count, const int channels, const int dim,
const Ftype* mDeviceKernel,
const Ftype* bottom_data, Ftype* top_data,
const Ftype zero,
const int div_factor, const hipStream_t stream) {
hipLaunchKernelGGL(( PReLUForward), dim3(CAFFE_GET_BLOCKS(count)), dim3(CUDA_NUM_THREADS), 0, stream,
count, channels, dim, mDeviceKernel, bottom_data, top_data, zero, div_factor);
hipError_t err = hipGetLastError();
return err;
}
PReLUPlugin::PReLUPlugin(const nvinfer1::Weights *weights, int nbWeights) {
mWeights = weights[0];
mWeights.values = malloc(mWeights.count * type2size(mWeights.type));
memcpy(const_cast<void *>(mWeights.values), weights[0].values, mWeights.count * type2size(mWeights.type));
}
// create the plugin at runtime from a byte stream
PReLUPlugin::PReLUPlugin(const void *data, size_t length) {
const char *d = static_cast<const char *>(data), *a = d;
read<int>(d, mNbInputChannels);
read<int>(d, mNbInputHeight);
read<int>(d, mNbInputWidth);
read<nvinfer1::DataType>(d, mDataType);
read<int64_t>(d, mWeights.count);
read<nvinfer1::DataType>(d, mWeights.type);
mWeights.values = nullptr;
mWeights.values = malloc(mWeights.count * type2size(mWeights.type));
memcpy(const_cast<void *>(mWeights.values), d, mWeights.count * type2size(mWeights.type));
d = d + mWeights.count * type2size(mWeights.type);
ASSERT(d == a + length);
}
size_t PReLUPlugin::getSerializationSize() const {
return sizeof(mNbInputChannels) + sizeof(mNbInputWidth) + sizeof(mNbInputHeight) + sizeof(mDataType) +
sizeof(mWeights.count) + sizeof(mWeights.type) + mWeights.count * type2size(mWeights.type);
}
void PReLUPlugin::serialize(void *buffer) const {
char *d = static_cast<char *>(buffer), *a = d;
write(d, mNbInputChannels);
write(d, mNbInputHeight);
write(d, mNbInputWidth);
write(d, mDataType);
write(d, mWeights.count);
write(d, mWeights.type);
convertAndCopyToBuffer(d, mWeights, mWeights.type);
ASSERT(d == a + getSerializationSize());
}
PReLUPlugin::~PReLUPlugin() {
if (mWeights.values)
{
free(const_cast<void *>(mWeights.values));
mWeights.values = nullptr;
}
if (mDeviceKernel)
{
hipFree(mDeviceKernel);
mDeviceKernel = nullptr;
}
}
int PReLUPlugin::getNbOutputs() const {
return 1;
}
nvinfer1::Dims PReLUPlugin::getOutputDimensions(int index, const nvinfer1::Dims* inputs, int nbInputDims) {
if(index == 0) {
return nvinfer1::Dims3(inputs[0].d[0],inputs[0].d[1],inputs[0].d[2]);
} // else if(index == n) {
// for other outputs if exists.
// }
else {
ASSERT(false);
}
}
bool PReLUPlugin::supportsFormat(nvinfer1::DataType type, nvinfer1::PluginFormat format) const {
return (type == nvinfer1::DataType::kFLOAT || type == nvinfer1::DataType::kHALF)
&& format == nvinfer1::PluginFormat::kNCHW;
}
void PReLUPlugin::configureWithFormat(const nvinfer1::Dims* inputDims, int nbInputs,
const nvinfer1::Dims* outputDims, int nbOutputs,
nvinfer1::DataType type, nvinfer1::PluginFormat format,
int maxBatchSize) {
ASSERT((type == nvinfer1::DataType::kFLOAT || type == nvinfer1::DataType::kHALF)
&& format == nvinfer1::PluginFormat::kNCHW);
mNbInputChannels = inputDims[0].d[0];
mNbInputHeight = inputDims[0].d[1];
mNbInputWidth = inputDims[0].d[2];
mDataType = type;
}
int PReLUPlugin::initialize() {
convertAndCopyToDeivce(mDeviceKernel, mWeights, mDataType);
return 0;
}
void PReLUPlugin::terminate() {
if (mWeights.values)
{
free(const_cast<void *>(mWeights.values));
mWeights.values = nullptr;
}
if (mDeviceKernel)
{
hipFree(mDeviceKernel);
mDeviceKernel = nullptr;
}
}
size_t PReLUPlugin::getWorkspaceSize(int maxBatchSize) const
{
return 0;
}
int PReLUPlugin::enqueue(int batchSize, const void *const *inputs, void **outputs, void *workspace, hipStream_t stream)
{
const int count = batchSize * mNbInputChannels * mNbInputWidth * mNbInputHeight;
const int channels = mNbInputChannels;
const int dim = mNbInputWidth * mNbInputHeight;
const int div_factor = 1;
if (mDataType == nvinfer1::DataType::kFLOAT)
{
const float zerof{0.0f};
CUDA_CHECK(Forward_gpu(count, channels, dim,
reinterpret_cast<const float *>(mDeviceKernel),
reinterpret_cast<const float *>(inputs[0]),
reinterpret_cast<float *>(outputs[0]),
zerof,
div_factor,
stream));
} else {
const __half zeroh = __half(0.0f);
CUDA_CHECK(Forward_gpu(count, channels, dim,
reinterpret_cast<const __half *>(mDeviceKernel),
reinterpret_cast<const __half *>(inputs[0]),
reinterpret_cast<__half *>(outputs[0]),
zeroh,
div_factor,
stream));
}
return 0;
}
const char *PReLUPlugin::getPluginType() const {
return G_PRELU_TYPE;
}
const char *PReLUPlugin::getPluginVersion() const {
return G_PLUGIN_VERSION;
}
void PReLUPlugin::destroy() {
delete this;
}
nvinfer1::IPluginV2* PReLUPlugin::clone() const {
return new PReLUPlugin(&mWeights, 1);
}
const char* PReLUPlugin::getPluginNamespace() const {
return G_PLUGIN_NAMESPACE;
}
PReLUPluginCreator::PReLUPluginCreator() {
mPluginAttributes.emplace_back(nvinfer1::PluginField("weights", nullptr, nvinfer1::PluginFieldType::kFLOAT32, 1));
mPluginAttributes.emplace_back(nvinfer1::PluginField("nbWeight", nullptr, nvinfer1::PluginFieldType::kINT32, 1));
mFC.nbFields = mPluginAttributes.size();
mFC.fields = mPluginAttributes.data();
}
// return PRELU_PLUGIN_TYPE + PRELU_PLUGIN_NAMESPACE
const char* PReLUPluginCreator::getPluginName() const {
// std::string plugin_type{G_PRELU_TYPE};
// std::string plugin_namespace{G_PLUGIN_NAMESPACE};
// return (plugin_type+plugin_namespace).c_str();
return G_PRELU_NAME;
}
const char* PReLUPluginCreator::getPluginVersion() const {
return G_PLUGIN_VERSION;
}
const nvinfer1::PluginFieldCollection* PReLUPluginCreator::getFieldNames() {
return &mFC;
}
nvinfer1::IPluginV2* PReLUPluginCreator::createPlugin(const char* name, const nvinfer1::PluginFieldCollection* fc) {
int nbWeights;
std::vector<float> weightValues;
const nvinfer1::PluginField* fields = fc->fields;
for (int i=0; i<fc->nbFields; i++) {
const char* attrName = fields[i].name;
if(strcmp(attrName, "nbWeights")) {
ASSERT(fields[i].type == nvinfer1::PluginFieldType::kINT32);
nbWeights = *(static_cast<const int*>(fields[i].data));
}
if(strcmp(attrName, "weights")) {
ASSERT(fields[i].type == nvinfer1::PluginFieldType::kFLOAT32);
weightValues.reserve(fields[i].length);
const auto* w = static_cast<const float*>(fields[i].data);
for (int j = 0; j < weightValues.size(); j++)
{
weightValues.push_back(*w);
w++;
}
}
}
nvinfer1::Weights weights{nvinfer1::DataType::kFLOAT, weightValues.data(), (int64_t)weightValues.size()};
return new PReLUPlugin(&weights,nbWeights);
}
// deserialization plugin implementation
nvinfer1::IPluginV2* PReLUPluginCreator::deserializePlugin(const char *layerName, const void *serialData, size_t serialLength) {
return new PReLUPlugin(serialData, serialLength);
}
const char* PReLUPluginCreator::getPluginNamespace() const {
return G_PLUGIN_NAMESPACE;
}
REGISTER_TENSORRT_PLUGIN(PReLUPluginCreator); // DO NOT FORGET THIS
//
| 2b4c42887dbcbec372b4f3051dc91200cbc96845.cu | #include <cstring>
#include <vector>
#include "cuda_runtime.h"
#include "cuda_fp16.h"
#include "NvInfer.h"
#include "NvInferPlugin.h"
#include "plugin_utils.h"
#include "PReLUPlugin.h"
#include "spdlog/spdlog.h"
static const char* G_PRELU_TYPE = "PReLU";
static const char* G_PRELU_NAME = "PReLU_TRT"; //plugin_name = plugin_type + plugin_namespace
// CUDA: use 512 threads per block
static const int CUDA_NUM_THREADS = 512;
// CUDA: number of blocks for threads.
inline int CAFFE_GET_BLOCKS(const int N) {
return (N + CUDA_NUM_THREADS - 1) / CUDA_NUM_THREADS;
}
// CUDA: grid stride looping
#define CUDA_KERNEL_LOOP(i, n) \
for (int i = blockIdx.x * blockDim.x + threadIdx.x; \
i < (n); \
i += blockDim.x * gridDim.x)
// /******** PReLU CUDA function ********/
// CUDA kernele for forward
template <typename Ftype>
__global__ void PReLUForward(const int n, const int channels, const int dim,
const Ftype* slope_data,
const Ftype* in, Ftype* out,
const Ftype zero,
const int div_factor) {
CUDA_KERNEL_LOOP(index, n) {
int c = (index / dim) % channels / div_factor;
if(in[index] > zero) {
out[index] = in[index];
} else {
out[index] = in[index] * slope_data[c];
}
}
}
template <typename Ftype>
cudaError_t Forward_gpu(const int count, const int channels, const int dim,
const Ftype* mDeviceKernel,
const Ftype* bottom_data, Ftype* top_data,
const Ftype zero,
const int div_factor, const cudaStream_t stream) {
PReLUForward<<<CAFFE_GET_BLOCKS(count), CUDA_NUM_THREADS, 0, stream>>>
(count, channels, dim, mDeviceKernel, bottom_data, top_data, zero, div_factor);
cudaError_t err = cudaGetLastError();
return err;
}
PReLUPlugin::PReLUPlugin(const nvinfer1::Weights *weights, int nbWeights) {
mWeights = weights[0];
mWeights.values = malloc(mWeights.count * type2size(mWeights.type));
memcpy(const_cast<void *>(mWeights.values), weights[0].values, mWeights.count * type2size(mWeights.type));
}
// create the plugin at runtime from a byte stream
PReLUPlugin::PReLUPlugin(const void *data, size_t length) {
const char *d = static_cast<const char *>(data), *a = d;
read<int>(d, mNbInputChannels);
read<int>(d, mNbInputHeight);
read<int>(d, mNbInputWidth);
read<nvinfer1::DataType>(d, mDataType);
read<int64_t>(d, mWeights.count);
read<nvinfer1::DataType>(d, mWeights.type);
mWeights.values = nullptr;
mWeights.values = malloc(mWeights.count * type2size(mWeights.type));
memcpy(const_cast<void *>(mWeights.values), d, mWeights.count * type2size(mWeights.type));
d = d + mWeights.count * type2size(mWeights.type);
ASSERT(d == a + length);
}
size_t PReLUPlugin::getSerializationSize() const {
return sizeof(mNbInputChannels) + sizeof(mNbInputWidth) + sizeof(mNbInputHeight) + sizeof(mDataType) +
sizeof(mWeights.count) + sizeof(mWeights.type) + mWeights.count * type2size(mWeights.type);
}
void PReLUPlugin::serialize(void *buffer) const {
char *d = static_cast<char *>(buffer), *a = d;
write(d, mNbInputChannels);
write(d, mNbInputHeight);
write(d, mNbInputWidth);
write(d, mDataType);
write(d, mWeights.count);
write(d, mWeights.type);
convertAndCopyToBuffer(d, mWeights, mWeights.type);
ASSERT(d == a + getSerializationSize());
}
PReLUPlugin::~PReLUPlugin() {
if (mWeights.values)
{
free(const_cast<void *>(mWeights.values));
mWeights.values = nullptr;
}
if (mDeviceKernel)
{
cudaFree(mDeviceKernel);
mDeviceKernel = nullptr;
}
}
int PReLUPlugin::getNbOutputs() const {
return 1;
}
nvinfer1::Dims PReLUPlugin::getOutputDimensions(int index, const nvinfer1::Dims* inputs, int nbInputDims) {
if(index == 0) {
return nvinfer1::Dims3(inputs[0].d[0],inputs[0].d[1],inputs[0].d[2]);
} // else if(index == n) {
// for other outputs if exists.
// }
else {
ASSERT(false);
}
}
bool PReLUPlugin::supportsFormat(nvinfer1::DataType type, nvinfer1::PluginFormat format) const {
return (type == nvinfer1::DataType::kFLOAT || type == nvinfer1::DataType::kHALF)
&& format == nvinfer1::PluginFormat::kNCHW;
}
void PReLUPlugin::configureWithFormat(const nvinfer1::Dims* inputDims, int nbInputs,
const nvinfer1::Dims* outputDims, int nbOutputs,
nvinfer1::DataType type, nvinfer1::PluginFormat format,
int maxBatchSize) {
ASSERT((type == nvinfer1::DataType::kFLOAT || type == nvinfer1::DataType::kHALF)
&& format == nvinfer1::PluginFormat::kNCHW);
mNbInputChannels = inputDims[0].d[0];
mNbInputHeight = inputDims[0].d[1];
mNbInputWidth = inputDims[0].d[2];
mDataType = type;
}
int PReLUPlugin::initialize() {
convertAndCopyToDeivce(mDeviceKernel, mWeights, mDataType);
return 0;
}
void PReLUPlugin::terminate() {
if (mWeights.values)
{
free(const_cast<void *>(mWeights.values));
mWeights.values = nullptr;
}
if (mDeviceKernel)
{
cudaFree(mDeviceKernel);
mDeviceKernel = nullptr;
}
}
size_t PReLUPlugin::getWorkspaceSize(int maxBatchSize) const
{
return 0;
}
int PReLUPlugin::enqueue(int batchSize, const void *const *inputs, void **outputs, void *workspace, cudaStream_t stream)
{
const int count = batchSize * mNbInputChannels * mNbInputWidth * mNbInputHeight;
const int channels = mNbInputChannels;
const int dim = mNbInputWidth * mNbInputHeight;
const int div_factor = 1;
if (mDataType == nvinfer1::DataType::kFLOAT)
{
const float zerof{0.0f};
CUDA_CHECK(Forward_gpu(count, channels, dim,
reinterpret_cast<const float *>(mDeviceKernel),
reinterpret_cast<const float *>(inputs[0]),
reinterpret_cast<float *>(outputs[0]),
zerof,
div_factor,
stream));
} else {
const __half zeroh = __half(0.0f);
CUDA_CHECK(Forward_gpu(count, channels, dim,
reinterpret_cast<const __half *>(mDeviceKernel),
reinterpret_cast<const __half *>(inputs[0]),
reinterpret_cast<__half *>(outputs[0]),
zeroh,
div_factor,
stream));
}
return 0;
}
const char *PReLUPlugin::getPluginType() const {
return G_PRELU_TYPE;
}
const char *PReLUPlugin::getPluginVersion() const {
return G_PLUGIN_VERSION;
}
void PReLUPlugin::destroy() {
delete this;
}
nvinfer1::IPluginV2* PReLUPlugin::clone() const {
return new PReLUPlugin(&mWeights, 1);
}
const char* PReLUPlugin::getPluginNamespace() const {
return G_PLUGIN_NAMESPACE;
}
PReLUPluginCreator::PReLUPluginCreator() {
mPluginAttributes.emplace_back(nvinfer1::PluginField("weights", nullptr, nvinfer1::PluginFieldType::kFLOAT32, 1));
mPluginAttributes.emplace_back(nvinfer1::PluginField("nbWeight", nullptr, nvinfer1::PluginFieldType::kINT32, 1));
mFC.nbFields = mPluginAttributes.size();
mFC.fields = mPluginAttributes.data();
}
// return PRELU_PLUGIN_TYPE + PRELU_PLUGIN_NAMESPACE
const char* PReLUPluginCreator::getPluginName() const {
// std::string plugin_type{G_PRELU_TYPE};
// std::string plugin_namespace{G_PLUGIN_NAMESPACE};
// return (plugin_type+plugin_namespace).c_str();
return G_PRELU_NAME;
}
const char* PReLUPluginCreator::getPluginVersion() const {
return G_PLUGIN_VERSION;
}
const nvinfer1::PluginFieldCollection* PReLUPluginCreator::getFieldNames() {
return &mFC;
}
nvinfer1::IPluginV2* PReLUPluginCreator::createPlugin(const char* name, const nvinfer1::PluginFieldCollection* fc) {
int nbWeights;
std::vector<float> weightValues;
const nvinfer1::PluginField* fields = fc->fields;
for (int i=0; i<fc->nbFields; i++) {
const char* attrName = fields[i].name;
if(strcmp(attrName, "nbWeights")) {
ASSERT(fields[i].type == nvinfer1::PluginFieldType::kINT32);
nbWeights = *(static_cast<const int*>(fields[i].data));
}
if(strcmp(attrName, "weights")) {
ASSERT(fields[i].type == nvinfer1::PluginFieldType::kFLOAT32);
weightValues.reserve(fields[i].length);
const auto* w = static_cast<const float*>(fields[i].data);
for (int j = 0; j < weightValues.size(); j++)
{
weightValues.push_back(*w);
w++;
}
}
}
nvinfer1::Weights weights{nvinfer1::DataType::kFLOAT, weightValues.data(), (int64_t)weightValues.size()};
return new PReLUPlugin(&weights,nbWeights);
}
// deserialization plugin implementation
nvinfer1::IPluginV2* PReLUPluginCreator::deserializePlugin(const char *layerName, const void *serialData, size_t serialLength) {
return new PReLUPlugin(serialData, serialLength);
}
const char* PReLUPluginCreator::getPluginNamespace() const {
return G_PLUGIN_NAMESPACE;
}
REGISTER_TENSORRT_PLUGIN(PReLUPluginCreator); // DO NOT FORGET THIS
// 别忘了这个
|
cc937ed7ac8320c77fe5b3eefd19d0dca0ed1629.hip | // !!! This is a file automatically generated by hipify!!!
#include <hip/hip_runtime.h>
#include <opencv2/core/cuda/common.hpp>
#include <opencv2/cudev/ptr2d/glob.hpp>
#include <opencv2/cudev/ptr2d/gpumat.hpp>
#include "stereo_matcher.hpp"
namespace {
const int kPairsPerBlock = 64;
const int kThreadsPerPair = 4;
__global__ void computeScoresGpu(
const cv::cudev::GlobPtr<uchar> d1,
const cv::cudev::GlobPtr<uchar> d2,
CudaDeviceVector<ushort2>::Dev pairs,
ushort* global_scores) {
__shared__ ushort all_scores[kPairsPerBlock+1][kThreadsPerPair];
volatile ushort* scores = all_scores[threadIdx.y];
uint pair_id = kPairsPerBlock * blockIdx.x + threadIdx.y;
if (pair_id < pairs.size()) {
uint tid = threadIdx.x;
ushort2 p = pairs[pair_id];
const uint4* r1 = reinterpret_cast<const uint4*>(d1.row(p.x));
const uint4* r2 = reinterpret_cast<const uint4*>(d2.row(p.y));
uint4 b1 = *(r1 + tid);
uint4 b2 = *(r2 + tid);
scores[tid] =
__popc(b1.x ^ b2.x) +
__popc(b1.y ^ b2.y) +
__popc(b1.z ^ b2.z) +
__popc(b1.w ^ b2.w);
scores[tid] += scores[tid + 2];
scores[tid] += scores[tid + 1];
if (tid == 0) global_scores[pair_id] = scores[0];
}
}
}
__host__ Matcher::Matcher(int max_descriptors, int max_pairs) :
scores_gpu_(1, max_pairs),
m1_(max_descriptors),
m2_(max_descriptors) {
/* scores_cpu_.allocator = cv::cuda::HostMem::getAllocator( */
/* cv::cuda::HostMem::PAGE_LOCKED); */
scores_cpu_.create(1, max_pairs);
}
__host__ void Matcher::computeScores(
const cv::cudev::GpuMat_<uint8_t>& d1,
const cv::cudev::GpuMat_<uint8_t>& d2,
const CudaDeviceVector<ushort2>& pairs_gpu,
int n_pairs,
cv::cuda::Stream& stream) {
int n_blocks = (n_pairs + kPairsPerBlock - 1) / kPairsPerBlock;
dim3 block_dim(kThreadsPerPair, kPairsPerBlock);
auto cuda_stream = cv::cuda::StreamAccessor::getStream(stream);
hipLaunchKernelGGL(( computeScoresGpu), dim3(n_blocks), dim3(block_dim), 0, cuda_stream,
d1, d2,
pairs_gpu,
scores_gpu_.ptr<ushort>());
scores_gpu_.colRange(0, n_pairs).download(
scores_cpu_.colRange(0, n_pairs), stream);
cudaSafeCall(hipGetLastError());
}
__host__ void Matcher::gatherMatches(
int n1, int n2,
PinnedVector<ushort2>& pairs_cpu,
float threshold_ratio,
std::vector<ushort2>& matches) {
for (auto& m : m1_) {
m.best = m.second = m.match = 0xFFFF;
}
for (auto& m : m2_) {
m.best = m.second = m.match = 0xFFFF;
}
/* std::cout << "Scores CPU: " << scores_cpu_.colRange(0, pairs_cpu.size()) << std::endl; */
auto updateMatch = [](Match& m, uint16_t s, uint16_t j) {
if (s < m.best) {
m = { s, m.best, j };
} else if (s < m.second) {
m.second = s;
}
};
for (int i = 0; i < pairs_cpu.size(); ++i) {
const auto& p = pairs_cpu[i];
uint16_t s = scores_cpu_(0, i);
updateMatch(m1_[p.x], s, p.y);
updateMatch(m2_[p.y], s, p.x);
}
/* std::cout << "m1 = "; */
/* for (int i=0; i < n1; ++i) { */
/* std::cout << "[" << m1_[i].best << ", " */
/* << m1_[i].second << ", " << m1_[i].match << "] "; */
/* } */
/* std::cout << std::endl; */
matches.resize(0);
for (int i = 0; i < n1; ++i) {
const auto& m1 = m1_[i];
if (m1.best != 0xFFFF && m1.second * 0.8 > m1.best) {
const auto& m2 = m2_[m1.match];
if (m2.second * 0.8 > m2.best && m2.match == i) {
matches.push_back(make_ushort2(i, m1.match));
}
}
}
}
| cc937ed7ac8320c77fe5b3eefd19d0dca0ed1629.cu | #include <cuda_runtime.h>
#include <opencv2/core/cuda/common.hpp>
#include <opencv2/cudev/ptr2d/glob.hpp>
#include <opencv2/cudev/ptr2d/gpumat.hpp>
#include "stereo_matcher.hpp"
namespace {
const int kPairsPerBlock = 64;
const int kThreadsPerPair = 4;
__global__ void computeScoresGpu(
const cv::cudev::GlobPtr<uchar> d1,
const cv::cudev::GlobPtr<uchar> d2,
CudaDeviceVector<ushort2>::Dev pairs,
ushort* global_scores) {
__shared__ ushort all_scores[kPairsPerBlock+1][kThreadsPerPair];
volatile ushort* scores = all_scores[threadIdx.y];
uint pair_id = kPairsPerBlock * blockIdx.x + threadIdx.y;
if (pair_id < pairs.size()) {
uint tid = threadIdx.x;
ushort2 p = pairs[pair_id];
const uint4* r1 = reinterpret_cast<const uint4*>(d1.row(p.x));
const uint4* r2 = reinterpret_cast<const uint4*>(d2.row(p.y));
uint4 b1 = *(r1 + tid);
uint4 b2 = *(r2 + tid);
scores[tid] =
__popc(b1.x ^ b2.x) +
__popc(b1.y ^ b2.y) +
__popc(b1.z ^ b2.z) +
__popc(b1.w ^ b2.w);
scores[tid] += scores[tid + 2];
scores[tid] += scores[tid + 1];
if (tid == 0) global_scores[pair_id] = scores[0];
}
}
}
__host__ Matcher::Matcher(int max_descriptors, int max_pairs) :
scores_gpu_(1, max_pairs),
m1_(max_descriptors),
m2_(max_descriptors) {
/* scores_cpu_.allocator = cv::cuda::HostMem::getAllocator( */
/* cv::cuda::HostMem::PAGE_LOCKED); */
scores_cpu_.create(1, max_pairs);
}
__host__ void Matcher::computeScores(
const cv::cudev::GpuMat_<uint8_t>& d1,
const cv::cudev::GpuMat_<uint8_t>& d2,
const CudaDeviceVector<ushort2>& pairs_gpu,
int n_pairs,
cv::cuda::Stream& stream) {
int n_blocks = (n_pairs + kPairsPerBlock - 1) / kPairsPerBlock;
dim3 block_dim(kThreadsPerPair, kPairsPerBlock);
auto cuda_stream = cv::cuda::StreamAccessor::getStream(stream);
computeScoresGpu<<<n_blocks, block_dim, 0, cuda_stream>>>(
d1, d2,
pairs_gpu,
scores_gpu_.ptr<ushort>());
scores_gpu_.colRange(0, n_pairs).download(
scores_cpu_.colRange(0, n_pairs), stream);
cudaSafeCall(cudaGetLastError());
}
__host__ void Matcher::gatherMatches(
int n1, int n2,
PinnedVector<ushort2>& pairs_cpu,
float threshold_ratio,
std::vector<ushort2>& matches) {
for (auto& m : m1_) {
m.best = m.second = m.match = 0xFFFF;
}
for (auto& m : m2_) {
m.best = m.second = m.match = 0xFFFF;
}
/* std::cout << "Scores CPU: " << scores_cpu_.colRange(0, pairs_cpu.size()) << std::endl; */
auto updateMatch = [](Match& m, uint16_t s, uint16_t j) {
if (s < m.best) {
m = { s, m.best, j };
} else if (s < m.second) {
m.second = s;
}
};
for (int i = 0; i < pairs_cpu.size(); ++i) {
const auto& p = pairs_cpu[i];
uint16_t s = scores_cpu_(0, i);
updateMatch(m1_[p.x], s, p.y);
updateMatch(m2_[p.y], s, p.x);
}
/* std::cout << "m1 = "; */
/* for (int i=0; i < n1; ++i) { */
/* std::cout << "[" << m1_[i].best << ", " */
/* << m1_[i].second << ", " << m1_[i].match << "] "; */
/* } */
/* std::cout << std::endl; */
matches.resize(0);
for (int i = 0; i < n1; ++i) {
const auto& m1 = m1_[i];
if (m1.best != 0xFFFF && m1.second * 0.8 > m1.best) {
const auto& m2 = m2_[m1.match];
if (m2.second * 0.8 > m2.best && m2.match == i) {
matches.push_back(make_ushort2(i, m1.match));
}
}
}
}
|
74d2421e38f28f4321d23c2db6c2dbdad20d8af3.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/*
* Copyright (c) 2012, NVIDIA CORPORATION. All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
* * Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* * Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
* * Neither the name of NVIDIA CORPORATION nor the names of its
* contributors may be used to endorse or promote products derived
* from this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ``AS IS'' AND ANY
* EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
* PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
* CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
* EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
* PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
* PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
* OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
/*
* Paulius Micikevicius ([email protected])
* Max Grossman ([email protected])
*/
#include <stdio.h>
#include <stdlib.h>
#include <string.h>
#include <math.h>
#include <sys/time.h>
#include <unistd.h>
#include "common.h"
#include "common2d.h"
#define BDIMX 32
#define BDIMY 16
__global__ void fwd_kernel(TYPE *next, TYPE *curr, TYPE *vsq, TYPE *c_coeff,
int nx, int ny, int dimx, int radius) {
const int y = blockIdx.y * blockDim.y + threadIdx.y;
const int x = blockIdx.x * blockDim.x + threadIdx.x;
const int this_offset = POINT_OFFSET(x, y, dimx, radius);
TYPE div = c_coeff[0] * curr[this_offset];
for (int d = 1; d <= radius; d++) {
const int y_pos_offset = POINT_OFFSET(x, y + d, dimx, radius);
const int y_neg_offset = POINT_OFFSET(x, y - d, dimx, radius);
const int x_pos_offset = POINT_OFFSET(x + d, y, dimx, radius);
const int x_neg_offset = POINT_OFFSET(x - d, y, dimx, radius);
div += c_coeff[d] * (curr[y_pos_offset] +
curr[y_neg_offset] + curr[x_pos_offset] +
curr[x_neg_offset]);
}
const TYPE temp = 2.0f * curr[this_offset] - next[this_offset];
next[this_offset] = temp + div * vsq[this_offset];
}
int main( int argc, char *argv[] ) {
config conf;
setup_config(&conf, argc, argv);
init_progress(conf.progress_width, conf.nsteps, conf.progress_disabled);
if (conf.nx % BDIMX != 0) {
fprintf(stderr, "Invalid nx configuration, must be an even multiple of "
"%d\n", BDIMX);
return 1;
}
if (conf.ny % BDIMY != 0) {
fprintf(stderr, "Invalid ny configuration, must be an even multiple of "
"%d\n", BDIMY);
return 1;
}
TYPE dx = 20.f;
TYPE dt = 0.002f;
// compute the pitch for perfect coalescing
size_t dimx = conf.nx + 2*conf.radius;
size_t dimy = conf.ny + 2*conf.radius;
size_t nbytes = dimx * dimy * sizeof(TYPE);
if (conf.verbose) {
printf("x = %zu, y = %zu\n", dimx, dimy);
printf("nsteps = %d\n", conf.nsteps);
printf("radius = %d\n", conf.radius);
}
TYPE c_coeff[NUM_COEFF];
TYPE *curr = (TYPE *)malloc(nbytes);
TYPE *next = (TYPE *)malloc(nbytes);
TYPE *vsq = (TYPE *)malloc(nbytes);
if (curr == NULL || next == NULL || vsq == NULL) {
fprintf(stderr, "Allocations failed\n");
return 1;
}
config_sources(&conf.srcs, &conf.nsrcs, conf.nx, conf.ny, conf.nsteps);
TYPE **srcs = sample_sources(conf.srcs, conf.nsrcs, conf.nsteps, dt);
init_data(curr, next, vsq, c_coeff, dimx, dimy, dimx * sizeof(TYPE), dx, dt);
TYPE *d_curr, *d_next, *d_vsq, *d_c_coeff;
CHECK(hipMalloc((void **)&d_curr, nbytes));
CHECK(hipMalloc((void **)&d_next, nbytes));
CHECK(hipMalloc((void **)&d_vsq, nbytes));
CHECK(hipMalloc((void **)&d_c_coeff, NUM_COEFF * sizeof(TYPE)));
dim3 block(BDIMX, BDIMY);
dim3 grid(conf.nx / block.x, conf.ny / block.y);
double mem_start = seconds();
CHECK(hipMemcpy(d_curr, curr, nbytes, hipMemcpyHostToDevice));
CHECK(hipMemcpy(d_next, next, nbytes, hipMemcpyHostToDevice));
CHECK(hipMemcpy(d_vsq, vsq, nbytes, hipMemcpyHostToDevice));
CHECK(hipMemcpy(d_c_coeff, c_coeff, NUM_COEFF * sizeof(TYPE),
hipMemcpyHostToDevice));
double start = seconds();
for (int step = 0; step < conf.nsteps; step++) {
for (int src = 0; src < conf.nsrcs; src++) {
if (conf.srcs[src].t > step) continue;
int src_offset = POINT_OFFSET(conf.srcs[src].x, conf.srcs[src].y,
dimx, conf.radius);
CHECK(hipMemcpy(d_curr + src_offset, srcs[src] + step,
sizeof(TYPE), hipMemcpyHostToDevice));
}
hipLaunchKernelGGL(( fwd_kernel), dim3(grid), dim3(block), 0, 0, d_next, d_curr, d_vsq, d_c_coeff,
conf.nx, conf.ny, dimx, conf.radius);
TYPE *tmp = d_next;
d_next = d_curr;
d_curr = tmp;
update_progress(step + 1);
}
CHECK(hipDeviceSynchronize());
CHECK(hipGetLastError());
double compute_s = seconds() - start;
CHECK(hipMemcpy(curr, d_curr, nbytes, hipMemcpyDeviceToHost));
double total_s = seconds() - mem_start;
float point_rate = (float)conf.nx * conf.ny / (compute_s / conf.nsteps);
printf("iso_r4_2x: %8.10f s total, %8.10f s/step, %8.2f Mcells/s/step\n",
total_s, compute_s / conf.nsteps, point_rate / 1000000.f);
if (conf.save_text) {
save_text(curr, dimx, dimy, conf.ny, conf.nx, "snap.text", conf.radius);
}
free(curr);
free(next);
free(vsq);
for (int i = 0; i < conf.nsrcs; i++) {
free(srcs[i]);
}
free(srcs);
CHECK(hipFree(d_curr));
CHECK(hipFree(d_next));
CHECK(hipFree(d_vsq));
CHECK(hipFree(d_c_coeff));
return 0;
}
| 74d2421e38f28f4321d23c2db6c2dbdad20d8af3.cu | /*
* Copyright (c) 2012, NVIDIA CORPORATION. All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
* * Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* * Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
* * Neither the name of NVIDIA CORPORATION nor the names of its
* contributors may be used to endorse or promote products derived
* from this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ``AS IS'' AND ANY
* EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
* PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
* CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
* EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
* PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
* PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
* OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
/*
* Paulius Micikevicius ([email protected])
* Max Grossman ([email protected])
*/
#include <stdio.h>
#include <stdlib.h>
#include <string.h>
#include <math.h>
#include <sys/time.h>
#include <unistd.h>
#include "common.h"
#include "common2d.h"
#define BDIMX 32
#define BDIMY 16
__global__ void fwd_kernel(TYPE *next, TYPE *curr, TYPE *vsq, TYPE *c_coeff,
int nx, int ny, int dimx, int radius) {
const int y = blockIdx.y * blockDim.y + threadIdx.y;
const int x = blockIdx.x * blockDim.x + threadIdx.x;
const int this_offset = POINT_OFFSET(x, y, dimx, radius);
TYPE div = c_coeff[0] * curr[this_offset];
for (int d = 1; d <= radius; d++) {
const int y_pos_offset = POINT_OFFSET(x, y + d, dimx, radius);
const int y_neg_offset = POINT_OFFSET(x, y - d, dimx, radius);
const int x_pos_offset = POINT_OFFSET(x + d, y, dimx, radius);
const int x_neg_offset = POINT_OFFSET(x - d, y, dimx, radius);
div += c_coeff[d] * (curr[y_pos_offset] +
curr[y_neg_offset] + curr[x_pos_offset] +
curr[x_neg_offset]);
}
const TYPE temp = 2.0f * curr[this_offset] - next[this_offset];
next[this_offset] = temp + div * vsq[this_offset];
}
int main( int argc, char *argv[] ) {
config conf;
setup_config(&conf, argc, argv);
init_progress(conf.progress_width, conf.nsteps, conf.progress_disabled);
if (conf.nx % BDIMX != 0) {
fprintf(stderr, "Invalid nx configuration, must be an even multiple of "
"%d\n", BDIMX);
return 1;
}
if (conf.ny % BDIMY != 0) {
fprintf(stderr, "Invalid ny configuration, must be an even multiple of "
"%d\n", BDIMY);
return 1;
}
TYPE dx = 20.f;
TYPE dt = 0.002f;
// compute the pitch for perfect coalescing
size_t dimx = conf.nx + 2*conf.radius;
size_t dimy = conf.ny + 2*conf.radius;
size_t nbytes = dimx * dimy * sizeof(TYPE);
if (conf.verbose) {
printf("x = %zu, y = %zu\n", dimx, dimy);
printf("nsteps = %d\n", conf.nsteps);
printf("radius = %d\n", conf.radius);
}
TYPE c_coeff[NUM_COEFF];
TYPE *curr = (TYPE *)malloc(nbytes);
TYPE *next = (TYPE *)malloc(nbytes);
TYPE *vsq = (TYPE *)malloc(nbytes);
if (curr == NULL || next == NULL || vsq == NULL) {
fprintf(stderr, "Allocations failed\n");
return 1;
}
config_sources(&conf.srcs, &conf.nsrcs, conf.nx, conf.ny, conf.nsteps);
TYPE **srcs = sample_sources(conf.srcs, conf.nsrcs, conf.nsteps, dt);
init_data(curr, next, vsq, c_coeff, dimx, dimy, dimx * sizeof(TYPE), dx, dt);
TYPE *d_curr, *d_next, *d_vsq, *d_c_coeff;
CHECK(cudaMalloc((void **)&d_curr, nbytes));
CHECK(cudaMalloc((void **)&d_next, nbytes));
CHECK(cudaMalloc((void **)&d_vsq, nbytes));
CHECK(cudaMalloc((void **)&d_c_coeff, NUM_COEFF * sizeof(TYPE)));
dim3 block(BDIMX, BDIMY);
dim3 grid(conf.nx / block.x, conf.ny / block.y);
double mem_start = seconds();
CHECK(cudaMemcpy(d_curr, curr, nbytes, cudaMemcpyHostToDevice));
CHECK(cudaMemcpy(d_next, next, nbytes, cudaMemcpyHostToDevice));
CHECK(cudaMemcpy(d_vsq, vsq, nbytes, cudaMemcpyHostToDevice));
CHECK(cudaMemcpy(d_c_coeff, c_coeff, NUM_COEFF * sizeof(TYPE),
cudaMemcpyHostToDevice));
double start = seconds();
for (int step = 0; step < conf.nsteps; step++) {
for (int src = 0; src < conf.nsrcs; src++) {
if (conf.srcs[src].t > step) continue;
int src_offset = POINT_OFFSET(conf.srcs[src].x, conf.srcs[src].y,
dimx, conf.radius);
CHECK(cudaMemcpy(d_curr + src_offset, srcs[src] + step,
sizeof(TYPE), cudaMemcpyHostToDevice));
}
fwd_kernel<<<grid, block>>>(d_next, d_curr, d_vsq, d_c_coeff,
conf.nx, conf.ny, dimx, conf.radius);
TYPE *tmp = d_next;
d_next = d_curr;
d_curr = tmp;
update_progress(step + 1);
}
CHECK(cudaDeviceSynchronize());
CHECK(cudaGetLastError());
double compute_s = seconds() - start;
CHECK(cudaMemcpy(curr, d_curr, nbytes, cudaMemcpyDeviceToHost));
double total_s = seconds() - mem_start;
float point_rate = (float)conf.nx * conf.ny / (compute_s / conf.nsteps);
printf("iso_r4_2x: %8.10f s total, %8.10f s/step, %8.2f Mcells/s/step\n",
total_s, compute_s / conf.nsteps, point_rate / 1000000.f);
if (conf.save_text) {
save_text(curr, dimx, dimy, conf.ny, conf.nx, "snap.text", conf.radius);
}
free(curr);
free(next);
free(vsq);
for (int i = 0; i < conf.nsrcs; i++) {
free(srcs[i]);
}
free(srcs);
CHECK(cudaFree(d_curr));
CHECK(cudaFree(d_next));
CHECK(cudaFree(d_vsq));
CHECK(cudaFree(d_c_coeff));
return 0;
}
|
87e26652a35d92b20fe248697ca516785cac8720.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/*
Implementing Radix sort in CUDA.
*/
#include <stdio.h>
#include <stdlib.h>
#define NUM_ELEMENTS 16
__device__ void partition_by_bit(unsigned int* values, unsigned int bit);
__global__ void radix_sort(unsigned int* d_array){
for(int bit = 0; bit < 32; bit++){
partition_by_bit(d_array, bit);
__syncthreads();
}
}
__device__ unsigned int plus_scan(unsigned int* bit_array){
unsigned int idx = threadIdx.x;
unsigned int size = blockDim.x;
for(int offset = 1; offset < size; offset *= 2){
unsigned int array_offset;
if (idx >= offset){
array_offset = bit_array[idx - offset];
}
__syncthreads();
if(idx >= offset){
bit_array[idx] = array_offset + bit_array[idx];
}
__syncthreads();
}
return bit_array[idx];
}
__device__ void partition_by_bit(unsigned int* values, unsigned int bit){
unsigned int idx = threadIdx.x;
unsigned int size = blockDim.x;
unsigned int x_i = values[idx];
unsigned int p_i = (x_i >> bit) & 1;
values[idx] = p_i;
__syncthreads();
unsigned int scan_val = plus_scan(values);
unsigned int total = size - values[size - 1];
__syncthreads();
if (p_i){
values[scan_val - 1 + total] = x_i;
}else{
values[idx - scan_val] = x_i;
}
}
int main(){
const unsigned int BYTES = NUM_ELEMENTS*sizeof(int);
unsigned int h_in [NUM_ELEMENTS];
unsigned int h_out [NUM_ELEMENTS];
for(int i = 0; i < NUM_ELEMENTS; i++){
h_in[i] = rand() % 100; // Generating random numbers between 0 and 99
}
unsigned int* d_array;
hipMalloc((void **) &d_array, BYTES);
hipMemcpy(d_array, h_in, BYTES, hipMemcpyHostToDevice);
hipLaunchKernelGGL(( radix_sort), dim3(1), dim3(NUM_ELEMENTS), 0, 0, d_array);
hipMemcpy(h_out, d_array, BYTES, hipMemcpyDeviceToHost);
printf("Unsorted: \n");
for(int i = 0; i < NUM_ELEMENTS; i++){
printf("%d ", h_in[i]);
}
printf("\n");
printf("Sorted: \n");
for(int i = 0; i < NUM_ELEMENTS; i++){
printf("%d ", h_out[i]);
}
hipFree(d_array);
return 0;
}
| 87e26652a35d92b20fe248697ca516785cac8720.cu | /*
Implementing Radix sort in CUDA.
*/
#include <stdio.h>
#include <stdlib.h>
#define NUM_ELEMENTS 16
__device__ void partition_by_bit(unsigned int* values, unsigned int bit);
__global__ void radix_sort(unsigned int* d_array){
for(int bit = 0; bit < 32; bit++){
partition_by_bit(d_array, bit);
__syncthreads();
}
}
__device__ unsigned int plus_scan(unsigned int* bit_array){
unsigned int idx = threadIdx.x;
unsigned int size = blockDim.x;
for(int offset = 1; offset < size; offset *= 2){
unsigned int array_offset;
if (idx >= offset){
array_offset = bit_array[idx - offset];
}
__syncthreads();
if(idx >= offset){
bit_array[idx] = array_offset + bit_array[idx];
}
__syncthreads();
}
return bit_array[idx];
}
__device__ void partition_by_bit(unsigned int* values, unsigned int bit){
unsigned int idx = threadIdx.x;
unsigned int size = blockDim.x;
unsigned int x_i = values[idx];
unsigned int p_i = (x_i >> bit) & 1;
values[idx] = p_i;
__syncthreads();
unsigned int scan_val = plus_scan(values);
unsigned int total = size - values[size - 1];
__syncthreads();
if (p_i){
values[scan_val - 1 + total] = x_i;
}else{
values[idx - scan_val] = x_i;
}
}
int main(){
const unsigned int BYTES = NUM_ELEMENTS*sizeof(int);
unsigned int h_in [NUM_ELEMENTS];
unsigned int h_out [NUM_ELEMENTS];
for(int i = 0; i < NUM_ELEMENTS; i++){
h_in[i] = rand() % 100; // Generating random numbers between 0 and 99
}
unsigned int* d_array;
cudaMalloc((void **) &d_array, BYTES);
cudaMemcpy(d_array, h_in, BYTES, cudaMemcpyHostToDevice);
radix_sort<<<1, NUM_ELEMENTS>>>(d_array);
cudaMemcpy(h_out, d_array, BYTES, cudaMemcpyDeviceToHost);
printf("Unsorted: \n");
for(int i = 0; i < NUM_ELEMENTS; i++){
printf("%d ", h_in[i]);
}
printf("\n");
printf("Sorted: \n");
for(int i = 0; i < NUM_ELEMENTS; i++){
printf("%d ", h_out[i]);
}
cudaFree(d_array);
return 0;
}
|
a72c4a19f9dddadca3673c3269e67351c150e90d.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
// Adapted from interp.cpp from Caffe util by Pauline Luc
// Originally developed by George Papandreou
#include <ATen/ATen.h>
#include <ATen/AccumulateType.h>
#include <ATen/NativeFunctions.h>
#include <ATen/TensorUtils.h>
#include <ATen/Utils.h>
#include <ATen/hip/HIPContext.h>
#include <ATen/hip/HIPApplyUtils.cuh>
#include <ATen/native/hip/UpSample.cuh>
namespace at {
namespace native {
namespace {
template <typename scalar_t, typename accscalar_t>
#ifdef __HIP_PLATFORM_HCC__
C10_LAUNCH_BOUNDS_1(1024)
#endif
__global__ void upsample_linear1d_out_frame(
const int n,
const accscalar_t rwidth,
const bool align_corners,
const PackedTensorAccessor<scalar_t, 3> idata,
PackedTensorAccessor<scalar_t, 3> odata) {
int index = threadIdx.x + blockIdx.x * blockDim.x;
const int batchsize = idata.size(0);
const int channels = idata.size(1);
const int width1 = idata.size(2);
const int width2 = odata.size(2);
if (index < n) {
const int w2 = index % width2;
// special case: just copy
if (width1 == width2) {
const int w1 = w2;
for (int n = 0; n < batchsize; n++) {
for (int c = 0; c < channels; ++c) {
const scalar_t val = idata[n][c][w1];
odata[n][c][w2] = val;
}
}
return;
}
//
const accscalar_t w1r = area_pixel_compute_source_index<accscalar_t>(
rwidth, w2, align_corners, /*cubic=*/false);
const int w1 = w1r;
const int w1p = (w1 < width1 - 1) ? 1 : 0;
const accscalar_t w1lambda = w1r - w1;
const accscalar_t w0lambda = static_cast<accscalar_t>(1) - w1lambda;
//
for (int n = 0; n < batchsize; n++) {
for (int c = 0; c < channels; ++c) {
const accscalar_t val =
w0lambda * idata[n][c][w1] + w1lambda * idata[n][c][w1 + w1p];
odata[n][c][w2] = static_cast<scalar_t>(val);
}
}
}
}
// Backward (adjoint) operation 1 <- 2 (accumulates)
template <typename scalar_t, typename accscalar_t>
#ifdef __HIP_PLATFORM_HCC__
C10_LAUNCH_BOUNDS_1(1024)
#endif
__global__ void upsample_linear1d_out_frame_backward(
const int n,
const accscalar_t rwidth,
const bool align_corners,
PackedTensorAccessor<scalar_t, 3> idata,
const PackedTensorAccessor<scalar_t, 3> odata) {
int index = threadIdx.x + blockIdx.x * blockDim.x;
const int batchsize = idata.size(0);
const int channels = idata.size(1);
const int width1 = idata.size(2);
const int width2 = odata.size(2);
if (index < n) {
const int w2 = index % width2;
// special case: just copy
if (width1 == width2) {
const int w1 = w2;
for (int n = 0; n < batchsize; n++) {
for (int c = 0; c < channels; ++c) {
const scalar_t val = odata[n][c][w1];
idata[n][c][w2] = val;
}
}
return;
}
//
const accscalar_t w1r = area_pixel_compute_source_index<accscalar_t>(
rwidth, w2, align_corners, /*cubic=*/false);
const int w1 = w1r;
const int w1p = (w1 < width1 - 1) ? 1 : 0;
const accscalar_t w1lambda = w1r - w1;
const accscalar_t w0lambda = static_cast<accscalar_t>(1) - w1lambda;
//
for (int n = 0; n < batchsize; n++) {
for (int c = 0; c < channels; ++c) {
const scalar_t d2val = odata[n][c][w2];
atomicAdd(&idata[n][c][w1], static_cast<scalar_t>(w0lambda * d2val));
atomicAdd(
&idata[n][c][w1 + w1p], static_cast<scalar_t>(w1lambda * d2val));
}
}
}
}
static void upsample_linear1d_out_cuda_template(
Tensor& output,
const Tensor& input,
IntArrayRef output_size,
bool align_corners) {
TensorArg input_arg{input, "input", 1}, output_arg{output, "output", 2};
checkAllSameGPU("upsample_linear1d_out_cuda", {input_arg, output_arg});
AT_CHECK(
output_size.size() == 1,
"It is expected output_size equals to 1, but got size ",
output_size.size());
int output_width = output_size[0];
int nbatch = input.size(0);
int channels = input.size(1);
int input_width = input.size(2);
upsample_1d_shape_check(
input, Tensor(), nbatch, channels, input_width, output_width);
output.resize_({input.size(0), input.size(1), output_width});
output.zero_();
AT_ASSERT(input_width > 0 && output_width > 0);
const int num_kernels = output_width;
const int num_threads =
at::cuda::getCurrentDeviceProperties()->maxThreadsPerBlock;
hipStream_t stream = at::hip::getCurrentHIPStreamMasqueradingAsCUDA();
AT_DISPATCH_FLOATING_TYPES_AND_HALF(
input.scalar_type(), "upsample_linear1d_out_frame", [&] {
using accscalar_t = at::acc_type<scalar_t, true>;
auto idata = input.packed_accessor<scalar_t, 3>();
auto odata = output.packed_accessor<scalar_t, 3>();
const accscalar_t rwidth = area_pixel_compute_scale<accscalar_t>(
input_width, output_width, align_corners);
hipLaunchKernelGGL(( upsample_linear1d_out_frame<scalar_t, accscalar_t>)
, dim3(cuda::ATenCeilDiv(num_kernels, num_threads)),
dim3(num_threads),
0,
stream, num_kernels, rwidth, align_corners, idata, odata);
});
AT_CUDA_CHECK(hipGetLastError());
}
static void upsample_linear1d_backward_out_cuda_template(
Tensor& grad_input,
const Tensor& grad_output_,
IntArrayRef output_size,
IntArrayRef input_size,
bool align_corners) {
TensorArg grad_output_arg{grad_output_, "grad_output_", 1},
grad_input_arg{grad_input, "grad_input", 2};
checkAllSameGPU(
"upsample_linear1d_backward_out_cuda", {grad_output_arg, grad_input_arg});
AT_CHECK(
output_size.size() == 1,
"It is expected output_size equals to 1, but got size ",
output_size.size());
AT_CHECK(
input_size.size() == 3,
"It is expected input_size equals to 3, but got size ",
input_size.size());
int output_width = output_size[0];
int nbatch = input_size[0];
int channels = input_size[1];
int input_width = input_size[2];
upsample_1d_shape_check(
Tensor(), grad_output_, nbatch, channels, input_width, output_width);
Tensor grad_output = grad_output_.contiguous();
grad_input.resize_({nbatch, channels, input_width});
grad_input.zero_();
const int num_kernels = output_width;
const int num_threads =
at::cuda::getCurrentDeviceProperties()->maxThreadsPerBlock;
hipStream_t stream = at::hip::getCurrentHIPStreamMasqueradingAsCUDA();
AT_DISPATCH_FLOATING_TYPES_AND_HALF(
grad_output.scalar_type(), "upsample_linear1d_out_frame_backward", [&] {
using accscalar_t = at::acc_type<scalar_t, true>;
auto idata = grad_input.packed_accessor<scalar_t, 3>();
auto odata = grad_output.packed_accessor<scalar_t, 3>();
const accscalar_t rwidth = area_pixel_compute_scale<accscalar_t>(
input_width, output_width, align_corners);
hipLaunchKernelGGL(( upsample_linear1d_out_frame_backward<scalar_t, accscalar_t>)
, dim3(cuda::ATenCeilDiv(num_kernels, num_threads)),
dim3(num_threads),
0,
stream, num_kernels, rwidth, align_corners, idata, odata);
});
AT_CUDA_CHECK(hipGetLastError());
}
} // namespace
Tensor& upsample_linear1d_out_cuda(
Tensor& output,
const Tensor& input,
IntArrayRef output_size,
bool align_corners) {
upsample_linear1d_out_cuda_template(
output, input, output_size, align_corners);
return output;
}
Tensor upsample_linear1d_cuda(
const Tensor& input,
IntArrayRef output_size,
bool align_corners) {
Tensor output = at::empty_like(input);
upsample_linear1d_out_cuda_template(
output, input, output_size, align_corners);
return output;
}
Tensor& upsample_linear1d_backward_out_cuda(
Tensor& grad_input,
const Tensor& grad_output,
IntArrayRef output_size,
IntArrayRef input_size,
bool align_corners) {
upsample_linear1d_backward_out_cuda_template(
grad_input, grad_output, output_size, input_size, align_corners);
return grad_input;
}
Tensor upsample_linear1d_backward_cuda(
const Tensor& grad_output,
IntArrayRef output_size,
IntArrayRef input_size,
bool align_corners) {
Tensor grad_input = at::empty_like(grad_output);
upsample_linear1d_backward_out_cuda_template(
grad_input, grad_output, output_size, input_size, align_corners);
return grad_input;
}
} // namespace native
} // namespace at
| a72c4a19f9dddadca3673c3269e67351c150e90d.cu | // Adapted from interp.cpp from Caffe util by Pauline Luc
// Originally developed by George Papandreou
#include <ATen/ATen.h>
#include <ATen/AccumulateType.h>
#include <ATen/NativeFunctions.h>
#include <ATen/TensorUtils.h>
#include <ATen/Utils.h>
#include <ATen/cuda/CUDAContext.h>
#include <ATen/cuda/CUDAApplyUtils.cuh>
#include <ATen/native/cuda/UpSample.cuh>
namespace at {
namespace native {
namespace {
template <typename scalar_t, typename accscalar_t>
#ifdef __HIP_PLATFORM_HCC__
C10_LAUNCH_BOUNDS_1(1024)
#endif
__global__ void upsample_linear1d_out_frame(
const int n,
const accscalar_t rwidth,
const bool align_corners,
const PackedTensorAccessor<scalar_t, 3> idata,
PackedTensorAccessor<scalar_t, 3> odata) {
int index = threadIdx.x + blockIdx.x * blockDim.x;
const int batchsize = idata.size(0);
const int channels = idata.size(1);
const int width1 = idata.size(2);
const int width2 = odata.size(2);
if (index < n) {
const int w2 = index % width2;
// special case: just copy
if (width1 == width2) {
const int w1 = w2;
for (int n = 0; n < batchsize; n++) {
for (int c = 0; c < channels; ++c) {
const scalar_t val = idata[n][c][w1];
odata[n][c][w2] = val;
}
}
return;
}
//
const accscalar_t w1r = area_pixel_compute_source_index<accscalar_t>(
rwidth, w2, align_corners, /*cubic=*/false);
const int w1 = w1r;
const int w1p = (w1 < width1 - 1) ? 1 : 0;
const accscalar_t w1lambda = w1r - w1;
const accscalar_t w0lambda = static_cast<accscalar_t>(1) - w1lambda;
//
for (int n = 0; n < batchsize; n++) {
for (int c = 0; c < channels; ++c) {
const accscalar_t val =
w0lambda * idata[n][c][w1] + w1lambda * idata[n][c][w1 + w1p];
odata[n][c][w2] = static_cast<scalar_t>(val);
}
}
}
}
// Backward (adjoint) operation 1 <- 2 (accumulates)
template <typename scalar_t, typename accscalar_t>
#ifdef __HIP_PLATFORM_HCC__
C10_LAUNCH_BOUNDS_1(1024)
#endif
__global__ void upsample_linear1d_out_frame_backward(
const int n,
const accscalar_t rwidth,
const bool align_corners,
PackedTensorAccessor<scalar_t, 3> idata,
const PackedTensorAccessor<scalar_t, 3> odata) {
int index = threadIdx.x + blockIdx.x * blockDim.x;
const int batchsize = idata.size(0);
const int channels = idata.size(1);
const int width1 = idata.size(2);
const int width2 = odata.size(2);
if (index < n) {
const int w2 = index % width2;
// special case: just copy
if (width1 == width2) {
const int w1 = w2;
for (int n = 0; n < batchsize; n++) {
for (int c = 0; c < channels; ++c) {
const scalar_t val = odata[n][c][w1];
idata[n][c][w2] = val;
}
}
return;
}
//
const accscalar_t w1r = area_pixel_compute_source_index<accscalar_t>(
rwidth, w2, align_corners, /*cubic=*/false);
const int w1 = w1r;
const int w1p = (w1 < width1 - 1) ? 1 : 0;
const accscalar_t w1lambda = w1r - w1;
const accscalar_t w0lambda = static_cast<accscalar_t>(1) - w1lambda;
//
for (int n = 0; n < batchsize; n++) {
for (int c = 0; c < channels; ++c) {
const scalar_t d2val = odata[n][c][w2];
atomicAdd(&idata[n][c][w1], static_cast<scalar_t>(w0lambda * d2val));
atomicAdd(
&idata[n][c][w1 + w1p], static_cast<scalar_t>(w1lambda * d2val));
}
}
}
}
static void upsample_linear1d_out_cuda_template(
Tensor& output,
const Tensor& input,
IntArrayRef output_size,
bool align_corners) {
TensorArg input_arg{input, "input", 1}, output_arg{output, "output", 2};
checkAllSameGPU("upsample_linear1d_out_cuda", {input_arg, output_arg});
AT_CHECK(
output_size.size() == 1,
"It is expected output_size equals to 1, but got size ",
output_size.size());
int output_width = output_size[0];
int nbatch = input.size(0);
int channels = input.size(1);
int input_width = input.size(2);
upsample_1d_shape_check(
input, Tensor(), nbatch, channels, input_width, output_width);
output.resize_({input.size(0), input.size(1), output_width});
output.zero_();
AT_ASSERT(input_width > 0 && output_width > 0);
const int num_kernels = output_width;
const int num_threads =
at::cuda::getCurrentDeviceProperties()->maxThreadsPerBlock;
cudaStream_t stream = at::cuda::getCurrentCUDAStream();
AT_DISPATCH_FLOATING_TYPES_AND_HALF(
input.scalar_type(), "upsample_linear1d_out_frame", [&] {
using accscalar_t = at::acc_type<scalar_t, true>;
auto idata = input.packed_accessor<scalar_t, 3>();
auto odata = output.packed_accessor<scalar_t, 3>();
const accscalar_t rwidth = area_pixel_compute_scale<accscalar_t>(
input_width, output_width, align_corners);
upsample_linear1d_out_frame<scalar_t, accscalar_t>
<<<cuda::ATenCeilDiv(num_kernels, num_threads),
num_threads,
0,
stream>>>(num_kernels, rwidth, align_corners, idata, odata);
});
AT_CUDA_CHECK(cudaGetLastError());
}
static void upsample_linear1d_backward_out_cuda_template(
Tensor& grad_input,
const Tensor& grad_output_,
IntArrayRef output_size,
IntArrayRef input_size,
bool align_corners) {
TensorArg grad_output_arg{grad_output_, "grad_output_", 1},
grad_input_arg{grad_input, "grad_input", 2};
checkAllSameGPU(
"upsample_linear1d_backward_out_cuda", {grad_output_arg, grad_input_arg});
AT_CHECK(
output_size.size() == 1,
"It is expected output_size equals to 1, but got size ",
output_size.size());
AT_CHECK(
input_size.size() == 3,
"It is expected input_size equals to 3, but got size ",
input_size.size());
int output_width = output_size[0];
int nbatch = input_size[0];
int channels = input_size[1];
int input_width = input_size[2];
upsample_1d_shape_check(
Tensor(), grad_output_, nbatch, channels, input_width, output_width);
Tensor grad_output = grad_output_.contiguous();
grad_input.resize_({nbatch, channels, input_width});
grad_input.zero_();
const int num_kernels = output_width;
const int num_threads =
at::cuda::getCurrentDeviceProperties()->maxThreadsPerBlock;
cudaStream_t stream = at::cuda::getCurrentCUDAStream();
AT_DISPATCH_FLOATING_TYPES_AND_HALF(
grad_output.scalar_type(), "upsample_linear1d_out_frame_backward", [&] {
using accscalar_t = at::acc_type<scalar_t, true>;
auto idata = grad_input.packed_accessor<scalar_t, 3>();
auto odata = grad_output.packed_accessor<scalar_t, 3>();
const accscalar_t rwidth = area_pixel_compute_scale<accscalar_t>(
input_width, output_width, align_corners);
upsample_linear1d_out_frame_backward<scalar_t, accscalar_t>
<<<cuda::ATenCeilDiv(num_kernels, num_threads),
num_threads,
0,
stream>>>(num_kernels, rwidth, align_corners, idata, odata);
});
AT_CUDA_CHECK(cudaGetLastError());
}
} // namespace
Tensor& upsample_linear1d_out_cuda(
Tensor& output,
const Tensor& input,
IntArrayRef output_size,
bool align_corners) {
upsample_linear1d_out_cuda_template(
output, input, output_size, align_corners);
return output;
}
Tensor upsample_linear1d_cuda(
const Tensor& input,
IntArrayRef output_size,
bool align_corners) {
Tensor output = at::empty_like(input);
upsample_linear1d_out_cuda_template(
output, input, output_size, align_corners);
return output;
}
Tensor& upsample_linear1d_backward_out_cuda(
Tensor& grad_input,
const Tensor& grad_output,
IntArrayRef output_size,
IntArrayRef input_size,
bool align_corners) {
upsample_linear1d_backward_out_cuda_template(
grad_input, grad_output, output_size, input_size, align_corners);
return grad_input;
}
Tensor upsample_linear1d_backward_cuda(
const Tensor& grad_output,
IntArrayRef output_size,
IntArrayRef input_size,
bool align_corners) {
Tensor grad_input = at::empty_like(grad_output);
upsample_linear1d_backward_out_cuda_template(
grad_input, grad_output, output_size, input_size, align_corners);
return grad_input;
}
} // namespace native
} // namespace at
|
37e0c0c8ac9e2177f9c750caea88a9a13187baa6.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
//Udacity HW 6
//Poisson Blending
/* Background
==========
The goal for this assignment is to take one image (the source) and
paste it into another image (the destination) attempting to match the
two images so that the pasting is non-obvious. This is
known as a "seamless clone".
The basic ideas are as follows:
1) Figure out the interior and border of the source image
2) Use the values of the border pixels in the destination image
as boundary conditions for solving a Poisson equation that tells
us how to blend the images.
No pixels from the destination except pixels on the border
are used to compute the match.
Solving the Poisson Equation
============================
There are multiple ways to solve this equation - we choose an iterative
method - specifically the Jacobi method. Iterative methods start with
a guess of the solution and then iterate to try and improve the guess
until it stops changing. If the problem was well-suited for the method
then it will stop and where it stops will be the solution.
The Jacobi method is the simplest iterative method and converges slowly -
that is we need a lot of iterations to get to the answer, but it is the
easiest method to write.
Jacobi Iterations
=================
Our initial guess is going to be the source image itself. This is a pretty
good guess for what the blended image will look like and it means that
we won't have to do as many iterations compared to if we had started far
from the final solution.
ImageGuess_prev (Floating point)
ImageGuess_next (Floating point)
DestinationImg
SourceImg
Follow these steps to implement one iteration:
1) For every pixel p in the interior, compute two sums over the four neighboring pixels:
Sum1: If the neighbor is in the interior then += ImageGuess_prev[neighbor]
else if the neighbor in on the border then += DestinationImg[neighbor]
Sum2: += SourceImg[p] - SourceImg[neighbor] (for all four neighbors)
2) Calculate the new pixel value:
float newVal= (Sum1 + Sum2) / 4.f <------ Notice that the result is FLOATING POINT
ImageGuess_next[p] = min(255, max(0, newVal)); //clamp to [0, 255]
In this assignment we will do 800 iterations.
*/
#include "utils.h"
#include <algorithm>
// Size of the block
const int K = 16;
// Size of the shared Memeory
const int SK = K * K;
// get 2d position from block
__device__
int2 get2dPos() {
return make_int2(
blockIdx.x * blockDim.x + threadIdx.x,
blockIdx.y * blockDim.y + threadIdx.y
);
}
// check whether a a value is within the image bounds
__device__
bool withinBounds(const int x, const int y, const size_t numRowsSource, const size_t numColsSource) {
return ((x < numColsSource) && (y < numRowsSource));
}
__device__
bool masked(uchar4 val) {
return (val.x != 255 || val.y != 255 || val.z != 255);
}
__device__
int getm(int x, int y, size_t numColsSource) {
return y*numColsSource + x;
}
__global__
void maskPredicateKernel(
const uchar4* const d_sourceImg,
int* d_borderPredicate,
int* d_interiorPredicate,
const size_t numRowsSource,
const size_t numColsSource) {
const int2 p = get2dPos();
const int m = getm(p.x, p.y, numColsSource);
if(!withinBounds(p.x, p.y, numRowsSource, numColsSource))
return;
// run through each pixel and determine if its
// on the border, or if its on the interior border
if(masked(d_sourceImg[m])) {
int inbounds = 0;
int interior = 0;
// count how many of our neighbors are masked,
// and how many neighbors we have
if (withinBounds(p.x, p.y+1, numRowsSource, numColsSource)) {
inbounds++;
if(masked(d_sourceImg[getm(p.x, p.y+1, numColsSource)]))
interior++;
}
if (withinBounds(p.x, p.y-1, numRowsSource, numColsSource)) {
inbounds++;
if(masked(d_sourceImg[getm(p.x, p.y-1, numColsSource)]))
interior++;
}
if (withinBounds(p.x+1, p.y, numRowsSource, numColsSource)) {
inbounds++;
if(masked(d_sourceImg[getm(p.x+1, p.y, numColsSource)]))
interior++;
}
if (withinBounds(p.x-1, p.y, numRowsSource, numColsSource)) {
inbounds++;
if(masked(d_sourceImg[getm(p.x-1, p.y, numColsSource)]))
interior++;
}
// clear out the values so we don't
// have to memset this destination stuff
d_interiorPredicate[m] = 0;
d_borderPredicate[m] = 0;
// if all our neighbors are masked, then its interior
if(inbounds == interior) {
d_interiorPredicate[m] = 1;
} else if (interior > 0) {
d_borderPredicate[m] = 1;
}
}
}
__global__
void separateChannelsKernel(
const uchar4* const inputImageRGBA,
float* const redChannel,
float* const greenChannel,
float* const blueChannel,
size_t numRows,
size_t numCols)
{
const int2 p = get2dPos();
const int m = getm(p.x, p.y, numCols);
const int tid = threadIdx.x + threadIdx.y * blockDim.y;
if(!withinBounds(p.x, p.y, numRows, numCols))
return;
__shared__ uchar4 s_data[SK];
s_data[tid] = inputImageRGBA[m];
redChannel[m] = (float)s_data[tid].x;
greenChannel[m] = (float)s_data[tid].y;
blueChannel[m] = (float)s_data[tid].z;
}
__global__
void recombineChannelsKernel(
uchar4* outputImageRGBA,
float* const redChannel,
float* const greenChannel,
float* const blueChannel,
size_t numRows,
size_t numCols)
{
const int2 p = get2dPos();
const int m = getm(p.x, p.y, numCols);
if(!withinBounds(p.x, p.y, numRows, numCols))
return;
outputImageRGBA[m].x = (char)redChannel[m];
outputImageRGBA[m].y = (char)greenChannel[m];
outputImageRGBA[m].z = (char)blueChannel[m];
}
__global__
void jacobiKernel(
float* d_in,
float* d_out,
const int* d_borderPredicate,
const int* d_interiorPredicate,
float* d_source,
float* d_dest,
size_t numRows,
size_t numCols)
{
const int2 p = get2dPos();
const int m = getm(p.x, p.y, numCols);
if(!withinBounds(p.x, p.y, numRows, numCols))
return;
// calculate these values as indicated in the videos
int lm;
if(d_interiorPredicate[m]==1) {
float a = 0.f, b=0.f, c=0.f, d=0.f;
float sourceVal = d_source[m];
if(withinBounds(p.x, p.y+1, numRows, numCols)) {
d++;
lm = getm(p.x, p.y+1, numCols);
if(d_interiorPredicate[lm]==1) {
a += d_in[lm];
} else if(d_borderPredicate[lm]==1) {
b += d_dest[lm];
}
c += (sourceVal-d_source[lm]);
}
if(withinBounds(p.x, p.y-1, numRows, numCols)) {
d++;
lm = getm(p.x, p.y-1, numCols);
if(d_interiorPredicate[lm]==1) {
a += d_in[lm];
} else if(d_borderPredicate[lm]==1) {
b += d_dest[lm];
}
c += (sourceVal-d_source[lm]);
}
if(withinBounds(p.x+1, p.y, numRows, numCols)) {
d++;
lm = getm(p.x+1, p.y, numCols);
if(d_interiorPredicate[lm]==1) {
a += d_in[lm];
} else if(d_borderPredicate[lm]==1) {
b += d_dest[lm];
}
c += (sourceVal-d_source[lm]);
}
if(withinBounds(p.x-1, p.y, numRows, numCols)) {
d++;
lm = getm(p.x-1, p.y, numCols);
if(d_interiorPredicate[lm]==1) {
a += d_in[lm];
} else if(d_borderPredicate[lm]==1) {
b += d_dest[lm];
}
c += (sourceVal-d_source[lm]);
}
d_out[m] = min(255.f, max(0.0, (a + b + c)/d));
} else {
d_out[m] = d_dest[m];
}
}
void your_blend(const uchar4* const h_sourceImg, //IN
const size_t numRowsSource, const size_t numColsSource,
const uchar4* const h_destImg, //IN
uchar4* const h_blendedImg) //OUT
{
// first push the dest and source onto the gpu
size_t imageSize = numRowsSource*numColsSource*sizeof(uchar4);
uchar4* d_sourceImg;
uchar4* d_destImg;
uchar4* d_finalImg;
// launch on the stream
hipStream_t s[3];
checkCudaErrors(hipMalloc(&d_sourceImg, imageSize));
checkCudaErrors(hipMalloc(&d_destImg, imageSize));
checkCudaErrors(hipMalloc(&d_finalImg, imageSize));
// Create the stream
for (int i = 0; i < 2; ++i) {
hipStreamCreate(&s[i]);
}
checkCudaErrors(hipMemcpyAsync(d_sourceImg, h_sourceImg, imageSize, hipMemcpyHostToDevice, s[0]));
checkCudaErrors(hipMemcpyAsync(d_destImg, h_destImg, imageSize, hipMemcpyHostToDevice, s[1]));
// Destroy the stream
for (int i = 0; i < 2; i++) {
hipStreamDestroy(s[i]);
}
// allocate predicate stuff
size_t predicateSize = numRowsSource*numColsSource*sizeof(int);
int* d_borderPredicate;
int* d_interiorPredicate;
checkCudaErrors(hipMalloc(&d_borderPredicate, predicateSize));
checkCudaErrors(hipMalloc(&d_interiorPredicate, predicateSize));
// make reusable dims
const dim3 blockSize(K, K);
const dim3 gridSize(numColsSource/blockSize.x + 1, numRowsSource/blockSize.y + 1);
/**
1) Compute a mask of the pixels from the source image to be copied
The pixels that shouldn't be copied are completely white, they
have R=255, G=255, B=255. Any other pixels SHOULD be copied.
**/
/**
2) Compute the interior and border regions of the mask. An interior
pixel has all 4 neighbors also inside the mask. A border pixel is
in the mask itself, but has at least one neighbor that isn't.
**/
// generate the predicates
hipLaunchKernelGGL(( maskPredicateKernel), dim3(gridSize), dim3(blockSize), 0, 0,
d_sourceImg,
d_borderPredicate,
d_interiorPredicate,
numRowsSource,
numColsSource
);
hipDeviceSynchronize(); checkCudaErrors(hipGetLastError());
/**
3) Separate out the incoming image into three separate channels
**/
size_t floatSize = numRowsSource*numColsSource*sizeof(float);
float *d_sourceImgR, *d_sourceImgG, *d_sourceImgB;
float *d_destImgR, *d_destImgG, *d_destImgB;
checkCudaErrors(hipMalloc(&d_sourceImgR, floatSize));
checkCudaErrors(hipMalloc(&d_sourceImgG, floatSize));
checkCudaErrors(hipMalloc(&d_sourceImgB, floatSize));
checkCudaErrors(hipMalloc(&d_destImgR, floatSize));
checkCudaErrors(hipMalloc(&d_destImgG, floatSize));
checkCudaErrors(hipMalloc(&d_destImgB, floatSize));
for (int i = 0; i < 2; ++i) {
hipStreamCreate(&s[i]);
}
hipLaunchKernelGGL(( separateChannelsKernel), dim3(gridSize), dim3(blockSize), 0, s[0],
d_sourceImg,
d_sourceImgR,
d_sourceImgG,
d_sourceImgB,
numRowsSource,
numColsSource);
hipDeviceSynchronize(); checkCudaErrors(hipGetLastError());
hipLaunchKernelGGL(( separateChannelsKernel), dim3(gridSize), dim3(blockSize), 0, s[1],
d_destImg,
d_destImgR,
d_destImgG,
d_destImgB,
numRowsSource,
numColsSource);
hipDeviceSynchronize(); checkCudaErrors(hipGetLastError());
for (int i = 0; i < 2; i++) {
hipStreamDestroy(s[i]);
}
/**
4) Create two float(!) buffers for each color channel that will
act as our guesses. Initialize them to the respective color
channel of the source image since that will act as our intial guess.
**/
// allocate floats
float *d_r0, *d_r1, *d_g0, *d_g1, *d_b0, *d_b1;
checkCudaErrors(hipMalloc(&d_r0, floatSize));
checkCudaErrors(hipMalloc(&d_r1, floatSize));
checkCudaErrors(hipMalloc(&d_b0, floatSize));
checkCudaErrors(hipMalloc(&d_b1, floatSize));
checkCudaErrors(hipMalloc(&d_g0, floatSize));
checkCudaErrors(hipMalloc(&d_g1, floatSize));
for (int i = 0 ; i < 3; i++) {
hipStreamCreate(&s[i]);
}
checkCudaErrors(hipMemcpyAsync(d_r0, d_sourceImgR, floatSize, hipMemcpyDeviceToDevice, s[0]));
checkCudaErrors(hipMemcpyAsync(d_g0, d_sourceImgG, floatSize, hipMemcpyDeviceToDevice, s[1]));
checkCudaErrors(hipMemcpyAsync(d_b0, d_sourceImgB, floatSize, hipMemcpyDeviceToDevice, s[2]));
hipDeviceSynchronize(); checkCudaErrors(hipGetLastError());
/**
5) For each color channel perform the Jacobi iteration described
above 800 times.
**/
for(int i = 0; i < 800; i++) {
hipLaunchKernelGGL(( jacobiKernel), dim3(gridSize), dim3(blockSize), 0, s[0],
d_r0,
d_r1,
d_borderPredicate,
d_interiorPredicate,
d_sourceImgR,
d_destImgR,
numRowsSource,
numColsSource
);
std::swap(d_r0, d_r1);
hipLaunchKernelGGL(( jacobiKernel), dim3(gridSize), dim3(blockSize), 0, s[1],
d_g0,
d_g1,
d_borderPredicate,
d_interiorPredicate,
d_sourceImgG,
d_destImgG,
numRowsSource,
numColsSource
);
std::swap(d_g0, d_g1);
hipLaunchKernelGGL(( jacobiKernel), dim3(gridSize), dim3(blockSize), 0, s[2],
d_b0,
d_b1,
d_borderPredicate,
d_interiorPredicate,
d_sourceImgB,
d_destImgB,
numRowsSource,
numColsSource
);
std::swap(d_b0, d_b1);
}
for (int i = 0; i < 3; i++) {
hipStreamDestroy(s[i]);
}
/**
6) Create the output image by replacing all the interior pixels
in the destination image with the result of the Jacobi iterations.
Just cast the floating point values to unsigned chars since we have
already made sure to clamp them to the correct range.
**/
// lets assume that d_r0, d_g0, d_b0 are the final pass
hipLaunchKernelGGL(( recombineChannelsKernel), dim3(gridSize), dim3(blockSize), 0, 0,
d_finalImg,
d_r0,
d_g0,
d_b0,
numRowsSource,
numColsSource);
hipDeviceSynchronize(); checkCudaErrors(hipGetLastError());
// copy device final image to host
checkCudaErrors(hipMemcpy(h_blendedImg, d_finalImg, imageSize, hipMemcpyDeviceToHost));
// cleanup
checkCudaErrors(hipFree(d_sourceImg));
checkCudaErrors(hipFree(d_destImg));
checkCudaErrors(hipFree(d_finalImg));
checkCudaErrors(hipFree(d_sourceImgR));
checkCudaErrors(hipFree(d_sourceImgG));
checkCudaErrors(hipFree(d_sourceImgB));
checkCudaErrors(hipFree(d_destImgR));
checkCudaErrors(hipFree(d_destImgG));
checkCudaErrors(hipFree(d_destImgB));
checkCudaErrors(hipFree(d_r0));
checkCudaErrors(hipFree(d_r1));
checkCudaErrors(hipFree(d_g0));
checkCudaErrors(hipFree(d_g1));
checkCudaErrors(hipFree(d_b0));
checkCudaErrors(hipFree(d_b1));
}
| 37e0c0c8ac9e2177f9c750caea88a9a13187baa6.cu | //Udacity HW 6
//Poisson Blending
/* Background
==========
The goal for this assignment is to take one image (the source) and
paste it into another image (the destination) attempting to match the
two images so that the pasting is non-obvious. This is
known as a "seamless clone".
The basic ideas are as follows:
1) Figure out the interior and border of the source image
2) Use the values of the border pixels in the destination image
as boundary conditions for solving a Poisson equation that tells
us how to blend the images.
No pixels from the destination except pixels on the border
are used to compute the match.
Solving the Poisson Equation
============================
There are multiple ways to solve this equation - we choose an iterative
method - specifically the Jacobi method. Iterative methods start with
a guess of the solution and then iterate to try and improve the guess
until it stops changing. If the problem was well-suited for the method
then it will stop and where it stops will be the solution.
The Jacobi method is the simplest iterative method and converges slowly -
that is we need a lot of iterations to get to the answer, but it is the
easiest method to write.
Jacobi Iterations
=================
Our initial guess is going to be the source image itself. This is a pretty
good guess for what the blended image will look like and it means that
we won't have to do as many iterations compared to if we had started far
from the final solution.
ImageGuess_prev (Floating point)
ImageGuess_next (Floating point)
DestinationImg
SourceImg
Follow these steps to implement one iteration:
1) For every pixel p in the interior, compute two sums over the four neighboring pixels:
Sum1: If the neighbor is in the interior then += ImageGuess_prev[neighbor]
else if the neighbor in on the border then += DestinationImg[neighbor]
Sum2: += SourceImg[p] - SourceImg[neighbor] (for all four neighbors)
2) Calculate the new pixel value:
float newVal= (Sum1 + Sum2) / 4.f <------ Notice that the result is FLOATING POINT
ImageGuess_next[p] = min(255, max(0, newVal)); //clamp to [0, 255]
In this assignment we will do 800 iterations.
*/
#include "utils.h"
#include <algorithm>
// Size of the block
const int K = 16;
// Size of the shared Memeory
const int SK = K * K;
// get 2d position from block
__device__
int2 get2dPos() {
return make_int2(
blockIdx.x * blockDim.x + threadIdx.x,
blockIdx.y * blockDim.y + threadIdx.y
);
}
// check whether a a value is within the image bounds
__device__
bool withinBounds(const int x, const int y, const size_t numRowsSource, const size_t numColsSource) {
return ((x < numColsSource) && (y < numRowsSource));
}
__device__
bool masked(uchar4 val) {
return (val.x != 255 || val.y != 255 || val.z != 255);
}
__device__
int getm(int x, int y, size_t numColsSource) {
return y*numColsSource + x;
}
__global__
void maskPredicateKernel(
const uchar4* const d_sourceImg,
int* d_borderPredicate,
int* d_interiorPredicate,
const size_t numRowsSource,
const size_t numColsSource) {
const int2 p = get2dPos();
const int m = getm(p.x, p.y, numColsSource);
if(!withinBounds(p.x, p.y, numRowsSource, numColsSource))
return;
// run through each pixel and determine if its
// on the border, or if its on the interior border
if(masked(d_sourceImg[m])) {
int inbounds = 0;
int interior = 0;
// count how many of our neighbors are masked,
// and how many neighbors we have
if (withinBounds(p.x, p.y+1, numRowsSource, numColsSource)) {
inbounds++;
if(masked(d_sourceImg[getm(p.x, p.y+1, numColsSource)]))
interior++;
}
if (withinBounds(p.x, p.y-1, numRowsSource, numColsSource)) {
inbounds++;
if(masked(d_sourceImg[getm(p.x, p.y-1, numColsSource)]))
interior++;
}
if (withinBounds(p.x+1, p.y, numRowsSource, numColsSource)) {
inbounds++;
if(masked(d_sourceImg[getm(p.x+1, p.y, numColsSource)]))
interior++;
}
if (withinBounds(p.x-1, p.y, numRowsSource, numColsSource)) {
inbounds++;
if(masked(d_sourceImg[getm(p.x-1, p.y, numColsSource)]))
interior++;
}
// clear out the values so we don't
// have to memset this destination stuff
d_interiorPredicate[m] = 0;
d_borderPredicate[m] = 0;
// if all our neighbors are masked, then its interior
if(inbounds == interior) {
d_interiorPredicate[m] = 1;
} else if (interior > 0) {
d_borderPredicate[m] = 1;
}
}
}
__global__
void separateChannelsKernel(
const uchar4* const inputImageRGBA,
float* const redChannel,
float* const greenChannel,
float* const blueChannel,
size_t numRows,
size_t numCols)
{
const int2 p = get2dPos();
const int m = getm(p.x, p.y, numCols);
const int tid = threadIdx.x + threadIdx.y * blockDim.y;
if(!withinBounds(p.x, p.y, numRows, numCols))
return;
__shared__ uchar4 s_data[SK];
s_data[tid] = inputImageRGBA[m];
redChannel[m] = (float)s_data[tid].x;
greenChannel[m] = (float)s_data[tid].y;
blueChannel[m] = (float)s_data[tid].z;
}
__global__
void recombineChannelsKernel(
uchar4* outputImageRGBA,
float* const redChannel,
float* const greenChannel,
float* const blueChannel,
size_t numRows,
size_t numCols)
{
const int2 p = get2dPos();
const int m = getm(p.x, p.y, numCols);
if(!withinBounds(p.x, p.y, numRows, numCols))
return;
outputImageRGBA[m].x = (char)redChannel[m];
outputImageRGBA[m].y = (char)greenChannel[m];
outputImageRGBA[m].z = (char)blueChannel[m];
}
__global__
void jacobiKernel(
float* d_in,
float* d_out,
const int* d_borderPredicate,
const int* d_interiorPredicate,
float* d_source,
float* d_dest,
size_t numRows,
size_t numCols)
{
const int2 p = get2dPos();
const int m = getm(p.x, p.y, numCols);
if(!withinBounds(p.x, p.y, numRows, numCols))
return;
// calculate these values as indicated in the videos
int lm;
if(d_interiorPredicate[m]==1) {
float a = 0.f, b=0.f, c=0.f, d=0.f;
float sourceVal = d_source[m];
if(withinBounds(p.x, p.y+1, numRows, numCols)) {
d++;
lm = getm(p.x, p.y+1, numCols);
if(d_interiorPredicate[lm]==1) {
a += d_in[lm];
} else if(d_borderPredicate[lm]==1) {
b += d_dest[lm];
}
c += (sourceVal-d_source[lm]);
}
if(withinBounds(p.x, p.y-1, numRows, numCols)) {
d++;
lm = getm(p.x, p.y-1, numCols);
if(d_interiorPredicate[lm]==1) {
a += d_in[lm];
} else if(d_borderPredicate[lm]==1) {
b += d_dest[lm];
}
c += (sourceVal-d_source[lm]);
}
if(withinBounds(p.x+1, p.y, numRows, numCols)) {
d++;
lm = getm(p.x+1, p.y, numCols);
if(d_interiorPredicate[lm]==1) {
a += d_in[lm];
} else if(d_borderPredicate[lm]==1) {
b += d_dest[lm];
}
c += (sourceVal-d_source[lm]);
}
if(withinBounds(p.x-1, p.y, numRows, numCols)) {
d++;
lm = getm(p.x-1, p.y, numCols);
if(d_interiorPredicate[lm]==1) {
a += d_in[lm];
} else if(d_borderPredicate[lm]==1) {
b += d_dest[lm];
}
c += (sourceVal-d_source[lm]);
}
d_out[m] = min(255.f, max(0.0, (a + b + c)/d));
} else {
d_out[m] = d_dest[m];
}
}
void your_blend(const uchar4* const h_sourceImg, //IN
const size_t numRowsSource, const size_t numColsSource,
const uchar4* const h_destImg, //IN
uchar4* const h_blendedImg) //OUT
{
// first push the dest and source onto the gpu
size_t imageSize = numRowsSource*numColsSource*sizeof(uchar4);
uchar4* d_sourceImg;
uchar4* d_destImg;
uchar4* d_finalImg;
// launch on the stream
cudaStream_t s[3];
checkCudaErrors(cudaMalloc(&d_sourceImg, imageSize));
checkCudaErrors(cudaMalloc(&d_destImg, imageSize));
checkCudaErrors(cudaMalloc(&d_finalImg, imageSize));
// Create the stream
for (int i = 0; i < 2; ++i) {
cudaStreamCreate(&s[i]);
}
checkCudaErrors(cudaMemcpyAsync(d_sourceImg, h_sourceImg, imageSize, cudaMemcpyHostToDevice, s[0]));
checkCudaErrors(cudaMemcpyAsync(d_destImg, h_destImg, imageSize, cudaMemcpyHostToDevice, s[1]));
// Destroy the stream
for (int i = 0; i < 2; i++) {
cudaStreamDestroy(s[i]);
}
// allocate predicate stuff
size_t predicateSize = numRowsSource*numColsSource*sizeof(int);
int* d_borderPredicate;
int* d_interiorPredicate;
checkCudaErrors(cudaMalloc(&d_borderPredicate, predicateSize));
checkCudaErrors(cudaMalloc(&d_interiorPredicate, predicateSize));
// make reusable dims
const dim3 blockSize(K, K);
const dim3 gridSize(numColsSource/blockSize.x + 1, numRowsSource/blockSize.y + 1);
/**
1) Compute a mask of the pixels from the source image to be copied
The pixels that shouldn't be copied are completely white, they
have R=255, G=255, B=255. Any other pixels SHOULD be copied.
**/
/**
2) Compute the interior and border regions of the mask. An interior
pixel has all 4 neighbors also inside the mask. A border pixel is
in the mask itself, but has at least one neighbor that isn't.
**/
// generate the predicates
maskPredicateKernel<<<gridSize, blockSize>>>(
d_sourceImg,
d_borderPredicate,
d_interiorPredicate,
numRowsSource,
numColsSource
);
cudaDeviceSynchronize(); checkCudaErrors(cudaGetLastError());
/**
3) Separate out the incoming image into three separate channels
**/
size_t floatSize = numRowsSource*numColsSource*sizeof(float);
float *d_sourceImgR, *d_sourceImgG, *d_sourceImgB;
float *d_destImgR, *d_destImgG, *d_destImgB;
checkCudaErrors(cudaMalloc(&d_sourceImgR, floatSize));
checkCudaErrors(cudaMalloc(&d_sourceImgG, floatSize));
checkCudaErrors(cudaMalloc(&d_sourceImgB, floatSize));
checkCudaErrors(cudaMalloc(&d_destImgR, floatSize));
checkCudaErrors(cudaMalloc(&d_destImgG, floatSize));
checkCudaErrors(cudaMalloc(&d_destImgB, floatSize));
for (int i = 0; i < 2; ++i) {
cudaStreamCreate(&s[i]);
}
separateChannelsKernel<<<gridSize, blockSize, 0, s[0]>>>(
d_sourceImg,
d_sourceImgR,
d_sourceImgG,
d_sourceImgB,
numRowsSource,
numColsSource);
cudaDeviceSynchronize(); checkCudaErrors(cudaGetLastError());
separateChannelsKernel<<<gridSize, blockSize, 0, s[1]>>>(
d_destImg,
d_destImgR,
d_destImgG,
d_destImgB,
numRowsSource,
numColsSource);
cudaDeviceSynchronize(); checkCudaErrors(cudaGetLastError());
for (int i = 0; i < 2; i++) {
cudaStreamDestroy(s[i]);
}
/**
4) Create two float(!) buffers for each color channel that will
act as our guesses. Initialize them to the respective color
channel of the source image since that will act as our intial guess.
**/
// allocate floats
float *d_r0, *d_r1, *d_g0, *d_g1, *d_b0, *d_b1;
checkCudaErrors(cudaMalloc(&d_r0, floatSize));
checkCudaErrors(cudaMalloc(&d_r1, floatSize));
checkCudaErrors(cudaMalloc(&d_b0, floatSize));
checkCudaErrors(cudaMalloc(&d_b1, floatSize));
checkCudaErrors(cudaMalloc(&d_g0, floatSize));
checkCudaErrors(cudaMalloc(&d_g1, floatSize));
for (int i = 0 ; i < 3; i++) {
cudaStreamCreate(&s[i]);
}
checkCudaErrors(cudaMemcpyAsync(d_r0, d_sourceImgR, floatSize, cudaMemcpyDeviceToDevice, s[0]));
checkCudaErrors(cudaMemcpyAsync(d_g0, d_sourceImgG, floatSize, cudaMemcpyDeviceToDevice, s[1]));
checkCudaErrors(cudaMemcpyAsync(d_b0, d_sourceImgB, floatSize, cudaMemcpyDeviceToDevice, s[2]));
cudaDeviceSynchronize(); checkCudaErrors(cudaGetLastError());
/**
5) For each color channel perform the Jacobi iteration described
above 800 times.
**/
for(int i = 0; i < 800; i++) {
jacobiKernel<<<gridSize, blockSize, 0, s[0]>>>(
d_r0,
d_r1,
d_borderPredicate,
d_interiorPredicate,
d_sourceImgR,
d_destImgR,
numRowsSource,
numColsSource
);
std::swap(d_r0, d_r1);
jacobiKernel<<<gridSize, blockSize, 0, s[1]>>>(
d_g0,
d_g1,
d_borderPredicate,
d_interiorPredicate,
d_sourceImgG,
d_destImgG,
numRowsSource,
numColsSource
);
std::swap(d_g0, d_g1);
jacobiKernel<<<gridSize, blockSize, 0, s[2]>>>(
d_b0,
d_b1,
d_borderPredicate,
d_interiorPredicate,
d_sourceImgB,
d_destImgB,
numRowsSource,
numColsSource
);
std::swap(d_b0, d_b1);
}
for (int i = 0; i < 3; i++) {
cudaStreamDestroy(s[i]);
}
/**
6) Create the output image by replacing all the interior pixels
in the destination image with the result of the Jacobi iterations.
Just cast the floating point values to unsigned chars since we have
already made sure to clamp them to the correct range.
**/
// lets assume that d_r0, d_g0, d_b0 are the final pass
recombineChannelsKernel<<<gridSize, blockSize>>>(
d_finalImg,
d_r0,
d_g0,
d_b0,
numRowsSource,
numColsSource);
cudaDeviceSynchronize(); checkCudaErrors(cudaGetLastError());
// copy device final image to host
checkCudaErrors(cudaMemcpy(h_blendedImg, d_finalImg, imageSize, cudaMemcpyDeviceToHost));
// cleanup
checkCudaErrors(cudaFree(d_sourceImg));
checkCudaErrors(cudaFree(d_destImg));
checkCudaErrors(cudaFree(d_finalImg));
checkCudaErrors(cudaFree(d_sourceImgR));
checkCudaErrors(cudaFree(d_sourceImgG));
checkCudaErrors(cudaFree(d_sourceImgB));
checkCudaErrors(cudaFree(d_destImgR));
checkCudaErrors(cudaFree(d_destImgG));
checkCudaErrors(cudaFree(d_destImgB));
checkCudaErrors(cudaFree(d_r0));
checkCudaErrors(cudaFree(d_r1));
checkCudaErrors(cudaFree(d_g0));
checkCudaErrors(cudaFree(d_g1));
checkCudaErrors(cudaFree(d_b0));
checkCudaErrors(cudaFree(d_b1));
}
|
5bd98518a72b921094da52f20d8918317fb363d2.hip | // !!! This is a file automatically generated by hipify!!!
// Copyright (c) 2016- Facebook, Inc (Adam Paszke)
// Copyright (c) 2014- Facebook, Inc (Soumith Chintala)
// Copyright (c) 2011-2014 Idiap Research Institute (Ronan Collobert)
// Copyright (c) 2012-2014 Deepmind Technologies (Koray Kavukcuoglu)
// Copyright (c) 2011-2012 NEC Laboratories America (Koray Kavukcuoglu)
// Copyright (c) 2011-2013 NYU (Clement Farabet)
// Copyright (c) 2006-2010 NEC Laboratories America (Ronan Collobert, Leon Bottou, Iain Melvin, Jason Weston) // NOLINT
// Copyright (c) 2006 Idiap Research Institute (Samy Bengio)
// Copyright (c) 2001-2004 Idiap Research Institute (Ronan Collobert, Samy Bengio, Johnny Mariethoz) // NOLINT
//
// All rights reserved.
//
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are met:
//
// 1. Redistributions of source code must retain the above copyright
// notice, this list of conditions and the following disclaimer.
//
// 2. Redistributions in binary form must reproduce the above copyright
// notice, this list of conditions and the following disclaimer in the
// documentation and/or other materials provided with the distribution.
//
// 3. Neither the names of Facebook, Deepmind Technologies, NYU, NEC Laboratories America // NOLINT
// and IDIAP Research Institute nor the names of its contributors may be
// used to endorse or promote products derived from this software without
// specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
// AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
// ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
// LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
// CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
// SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
// INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
// CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
// ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
// POSSIBILITY OF SUCH DAMAGE.
#include <unordered_map>
#include <mutex> // NOLINT
#include "eaconv/src/handle.h"
#include "eaconv/src/cuda_check.h"
#ifdef __cplusplus
extern "C" {
#endif
struct Handle {
cudnnHandle_t handle;
Handle() : handle(NULL) {
checkCUDNN(cudnnCreate(&handle));
}
~Handle() {
if (handle) {
cudnnDestroy(handle);
}
}
};
std::mutex mutex;
std::unordered_map<int, Handle> handles;
cudnnHandle_t getCudnnHandle(void) {
int device;
checkCUDA(hipGetDevice(&device));
std::lock_guard<std::mutex> guard(mutex);
return handles[device].handle;
}
#ifdef __cplusplus
}
#endif
| 5bd98518a72b921094da52f20d8918317fb363d2.cu | // Copyright (c) 2016- Facebook, Inc (Adam Paszke)
// Copyright (c) 2014- Facebook, Inc (Soumith Chintala)
// Copyright (c) 2011-2014 Idiap Research Institute (Ronan Collobert)
// Copyright (c) 2012-2014 Deepmind Technologies (Koray Kavukcuoglu)
// Copyright (c) 2011-2012 NEC Laboratories America (Koray Kavukcuoglu)
// Copyright (c) 2011-2013 NYU (Clement Farabet)
// Copyright (c) 2006-2010 NEC Laboratories America (Ronan Collobert, Leon Bottou, Iain Melvin, Jason Weston) // NOLINT
// Copyright (c) 2006 Idiap Research Institute (Samy Bengio)
// Copyright (c) 2001-2004 Idiap Research Institute (Ronan Collobert, Samy Bengio, Johnny Mariethoz) // NOLINT
//
// All rights reserved.
//
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are met:
//
// 1. Redistributions of source code must retain the above copyright
// notice, this list of conditions and the following disclaimer.
//
// 2. Redistributions in binary form must reproduce the above copyright
// notice, this list of conditions and the following disclaimer in the
// documentation and/or other materials provided with the distribution.
//
// 3. Neither the names of Facebook, Deepmind Technologies, NYU, NEC Laboratories America // NOLINT
// and IDIAP Research Institute nor the names of its contributors may be
// used to endorse or promote products derived from this software without
// specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
// AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
// ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
// LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
// CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
// SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
// INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
// CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
// ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
// POSSIBILITY OF SUCH DAMAGE.
#include <unordered_map>
#include <mutex> // NOLINT
#include "eaconv/src/handle.h"
#include "eaconv/src/cuda_check.h"
#ifdef __cplusplus
extern "C" {
#endif
struct Handle {
cudnnHandle_t handle;
Handle() : handle(NULL) {
checkCUDNN(cudnnCreate(&handle));
}
~Handle() {
if (handle) {
cudnnDestroy(handle);
}
}
};
std::mutex mutex;
std::unordered_map<int, Handle> handles;
cudnnHandle_t getCudnnHandle(void) {
int device;
checkCUDA(cudaGetDevice(&device));
std::lock_guard<std::mutex> guard(mutex);
return handles[device].handle;
}
#ifdef __cplusplus
}
#endif
|
c977ca608a65a936bb5ea40479d8dad07a1e75fc.hip | // !!! This is a file automatically generated by hipify!!!
#include "fractal.h"
#include <hip/hip_runtime.h>
#include <hip/hip_complex.h>
using namespace std;
/*struct Complex {
double Re;
double Im;
Complex(double re, double im) {
this->Re = re;
this->Im = im;
}
};*/
__global__ void mandelNum( int *counts, double *data, long *histogram, const int imgWidth, const int imgHeight, const double midX, const double midY,
const double scale, const int iterations, const bool julia, const double juliaX, const double juliaY ) {
int index_x = blockIdx.x * blockDim.x + threadIdx.x;
int index_y = blockIdx.y * blockDim.y + threadIdx.y;
int i = index_y * imgWidth + index_x;
if( index_x < imgWidth && index_y < imgHeight ) {
int counter = 0;
double a, b, x, y, ax, ay;
if (imgWidth > imgHeight) {
ax = ( ( double )imgWidth ) / imgHeight;
ay = 1;
} else {
ax = 1;
ay = ( ( double )imgHeight ) / imgWidth;
}
a = midX + 2 * ax * scale * ( double )( 2 * index_x - imgWidth ) / imgWidth;
b = midY + 2 * ay * scale * ( double )( 2 * index_y - imgHeight ) / imgHeight;
if( julia ) {
x = juliaX;
y = juliaY;
} else {
x = a;
y = b;
}
double asq = a * a, bsq = b * b;
double atemp;
double r2 = 2 << 16;
//calculate mandelnumber
while( ( asq + bsq < r2 ) && ( counter < iterations ) ) {
atemp = asq - bsq + x;
b = a * b;
b += b + y;
a = atemp;
counter++;
asq = a * a;
bsq = b * b;
}
if (counter != 0)
histogram[counter]++;
counts[i] = counter;
data[2 * i] = a;
data[2 * i + 1] = b;
}
}
__global__ void coloring( int *counts, double *data, long *histogram, GLubyte *array, const int imgWidth, const int imgHeight, const int depth, const int iterations ) {
int index_x = blockIdx.x * blockDim.x + threadIdx.x;
int index_y = blockIdx.y * blockDim.y + threadIdx.y;
int i = index_y * imgWidth + index_x;
if( index_x < imgWidth && index_y < imgHeight ) {
int total = histogram[iterations];
int j = depth * i;
double hue = (double)histogram[counts[i]];
if( counts[i] < iterations ) {
double hue2 = (double)histogram[counts[i] + 1];
double x = data[2 * i];
double y = data[2 * i + 1];
double zn = x * x + y * y;
//from: COLORING DYNAMICAL SYSTEMSIN THE COMPLEX PLANE
//http://math.unipa.it/~grim/Jbarrallo.PDF
double nu = ( log( log( (double)(2 << 16) ) ) - log( (double)(0.5 * log( zn )) ) ) / log( 2. ); //lg(log(b)) - lg(log(sqrt(zn))
hue += nu * (hue2 - hue);
}
hue /= total;
//colour scheme
GLubyte colorArray[] = { 0, 0, 0, 255, 0, 0, 255, 155, 0, 255, 255, 255, 0, 0, 0 };
int length = 5;
int n = (int)( hue * ( length - 1 ) );
double h = hue * ( length - 1 ) - n;
GLubyte r1 = colorArray[3 * n];
GLubyte g1 = colorArray[3 * n + 1];
GLubyte b1 = colorArray[3 * n + 2];
GLubyte r2 = colorArray[3 * n + 3];
GLubyte g2 = colorArray[3 * n + 4];
GLubyte b2 = colorArray[3 * n + 5];
GLubyte r = r1 * ( 1 - h ) + r2 * h;
GLubyte g = g1 * ( 1 - h ) + g2 * h;
GLubyte b = b1 * ( 1 - h ) + b2 * h;
array[j] = (GLubyte)r;
array[j + 1] = (GLubyte)g;
array[j + 2] = (GLubyte)b;
}
}
__global__ void mandelNumExp( GLubyte *array, const int imgWidth, const int imgHeight, const int depth, const double midX, const double midY,
const double scale, const int iterations, const bool julia, const double juliaX, const double juliaY ) {
int index_x = blockIdx.x * blockDim.x + threadIdx.x;
int index_y = blockIdx.y * blockDim.y + threadIdx.y;
int i = index_y * imgWidth + index_x;
if( index_x < imgWidth && index_y < imgHeight ) {
int counter = 0;
double a, b, x, y, ax, ay;
if (imgWidth > imgHeight) {
ax = ( ( double )imgWidth ) / imgHeight;
ay = 1;
} else {
ax = 1;
ay = ( ( double )imgHeight ) / imgWidth;
}
a = midX + 2 * ax * scale * ( double )( 2 * index_x - imgWidth ) / imgWidth;
b = midY + 2 * ay * scale * ( double )( 2 * index_y - imgHeight ) / imgHeight;
if( julia ) {
x = juliaX;
y = juliaY;
} else {
x = a;
y = b;
}
double asq = a * a, bsq = b * b;
double atemp, btemp;
double r = 4;
double e = 0;
//double c = -0.7198;
//double d = 0.9111;
double c = juliaX;
double d = juliaY;
double ctemp, dtemp;
//c = 0;
//d = 0;
//fractal formulas
//http://www.lifesmith.com/formulas.html
//calculate mandelnumber
while( ( asq + bsq < r ) && ( counter < iterations ) ) {
/*atemp = asq - bsq + x + c;
b = a * b;
b += b + y + d;
a = atemp;*/
//mul(a, b, a, b, &a, &b);
//add(a, b, x, y, &a, &b);
//d_function[2 * i](a, b, a, b, &a, &b);
//d_function[2 * i + 1](a, b, x, y, &a, &b);
//f_mul(a, b, a, b, &a, &b);
//f_add(a, b, x, y, &a, &b);
mul(a, b, a, b, &a, &b);
add(a, b, x, y, &a, &b);
add(a, b, c, d, &a, &b);
/*mul(a, b, a, b, &a, &b);
mul(a, b, x, y, &a, &b);
add(a, b, c, d, &a, &b);*/
/*atemp = a * a * a - 3 * a * b * b + a * c - b * d + x;
b = 3 * a * a * b - b * b * b + a * d + b * c + y;
a = atemp;*/
/*mul(a, b, c, d, &ctemp, &dtemp);
//pow(a, b, 3, &a, &b);
mul(a, b, a, b, &atemp, &btemp);
mul(a, b, atemp, btemp, &a, &b);
add(a, b, ctemp, dtemp, &a, &b);
add(a, b, x, y, &a, &b);*/
counter++;
e += expf( - ( asq + bsq ) ); //do not use sqrt as it does not add much
asq = a * a;
bsq = b * b;
}
float hue;
if (counter == iterations) {
hue = 1;
} else {
hue = ( 0.025f * e - (int)(0.025f * e) );
}
GLubyte R, G, B;
//colour scheme
GLubyte colorArray[] = { 0, 0, 0, 255, 0, 0, 255, 155, 0, 255, 255, 255, 0, 0, 0 };
int length = 5;
int n = (int)( hue * ( length - 1 ) );
float h = hue * ( length - 1 ) - n;
GLubyte r1 = colorArray[3 * n];
GLubyte g1 = colorArray[3 * n + 1];
GLubyte b1 = colorArray[3 * n + 2];
GLubyte r2 = colorArray[3 * n + 3];
GLubyte g2 = colorArray[3 * n + 4];
GLubyte b2 = colorArray[3 * n + 5];
R = r1 * ( 1 - h ) + r2 * h;
G = g1 * ( 1 - h ) + g2 * h;
B = b1 * ( 1 - h ) + b2 * h;
/*double H = 1 - hue;
double L = hue;
double S = 1;
double C = (1 - abs( 2 * L - 1 ) ) * S;
double Hprime = 6 * H; //H should be in [0,360), and H' in [0,6), but H is in [0, 1), so we do this instead.
double X = C * (double)(1 - abs( Hprime - 2 * (int)( Hprime / 2 ) - 1 ) ); //C * (1 - |H' mod 2 - 1|)
double m = L - C / 2;
GLubyte bC, bX, b0;
bC = (GLubyte)( ( C + m ) * 255 );
bX = (GLubyte)( ( X + m ) * 255 );
b0 = (GLubyte)( ( 0 + m ) * 255 );
if (Hprime < 1) { R = bC; G = bX; B = b0; }
else if (Hprime < 2) { R = bX; G = bC; B = b0; }
else if (Hprime < 3) { R = b0; G = bC; B = bX; }
else if (Hprime < 4) { R = b0; G = bX; B = bC; }
else if (Hprime < 5) { R = bX; G = b0; B = bX; }
else if (Hprime < 6) { R = bC; G = b0; B = bC; }
else { R = 0; G = 0; B = 0; }*/
int j = depth * i;
array[j] = R;
array[j + 1] = G;
array[j + 2] = B;
}
}
__global__ void partialSum( long *input, const int length ) {
int id = threadIdx.x + blockDim.x * threadIdx.y +
(blockIdx.x * blockDim.x * blockDim.y) +
(blockIdx.y * blockDim.x * blockDim.y * gridDim.x);
if (id == 0) {
for (int i = 1; i < length; i++) {
input[i] += input[i - 1];
}
}
}
__device__ void mul( double a, double b, double c, double d, double *x, double *y ) {
double re = a * c - b * d;
double im = a * d + b * c;
*x = re;
*y = im;
}
__device__ void add( double a, double b, double c, double d, double *x, double *y ) {
double re = a + c;
double im = b + d;
*x = re;
*y = im;
}
__device__ void pow( double a, double b, double n, double *x, double *y ) {
double re = 1;
double im = 0;
for (int i = 0; i < n; ++i) {
mul(re, im, a, b, &re, &im);
}
*x = re;
*y = im;
}
__device__ void exp( double a, double b, double *x, double *y ) {
double ea = exp(a);
double re = ea * cos(b);
double im = ea * sin(b);
*x = re;
*y = im;
}
//__device__ func f_mul = mul;
//__device__ func f_add = add;
void CalcFractal( GLubyte *devArray, int *counts, double *data, long *histogram, double dPosX, double dPosY, double dScale, int iWidth, int iHeight,
int iDepth, int iIterations, bool bIsJulia, double dJuliaX, double dJuliaY) {
dim3 blockSize;
blockSize.x = 8;
blockSize.y = 8;
dim3 gridSize;
gridSize.x = iWidth / blockSize.x;
gridSize.y = iHeight / blockSize.y;
/*int n = 2;
func *h_function;
func *d_function;
h_function = (func*)malloc(n * iWidth * iHeight * sizeof(func));
hipMalloc((void**) &d_function, n * iWidth * iHeight * sizeof(func));
for (int i = 0; i < iWidth * iHeight; ++i) {
hipMemcpyToSymbol(&h_function[2 * i], f_mul, sizeof(func));
hipMemcpyToSymbol(&h_function[2 * i + 1], f_add, sizeof(func));
}
hipMemcpy(d_function, h_function, n * iWidth * iHeight * sizeof(func), hipMemcpyHostToDevice);
*/
//switch( type ) {
// case FRACTAL_RENDERTYPE_HISTOGRAM:
//set histogram to 0 for all values
/*hipMemset( histogram, 0, (iIterations + 1) * sizeof(long) );
mandelNum <<< gridSize, blockSize >>> ( counts, data, histogram, iWidth, iHeight, dPosX, dPosY, dScale, iIterations, bIsJulia, dJuliaX, dJuliaY );
partialSum <<< gridSize, blockSize >>> ( histogram, iIterations + 1 );
coloring <<< gridSize, blockSize >>> ( counts, data, histogram, devArray, iWidth, iHeight, iDepth, iIterations );*/
// break;
// case FRACTAL_RENDERTYPE_EXPONENTIAL:
//mandelNumExp <<< gridSize, blockSize >>> ( devArray, iWidth,iHeight, iDepth, dPosX, dPosY, dScale,
// iIterations, bIsJulia, dJuliaX, dJuliaY, d_function );
hipLaunchKernelGGL(( mandelNumExp) , dim3(gridSize), dim3(blockSize) , 0, 0, devArray, iWidth,iHeight, iDepth, dPosX, dPosY, dScale,
iIterations, bIsJulia, dJuliaX, dJuliaY );
// break;
//}
//free(h_function);
//hipFree(d_function);
hipDeviceSynchronize();
} | c977ca608a65a936bb5ea40479d8dad07a1e75fc.cu | #include "fractal.h"
#include <cuda_runtime.h>
#include <cuComplex.h>
using namespace std;
/*struct Complex {
double Re;
double Im;
Complex(double re, double im) {
this->Re = re;
this->Im = im;
}
};*/
__global__ void mandelNum( int *counts, double *data, long *histogram, const int imgWidth, const int imgHeight, const double midX, const double midY,
const double scale, const int iterations, const bool julia, const double juliaX, const double juliaY ) {
int index_x = blockIdx.x * blockDim.x + threadIdx.x;
int index_y = blockIdx.y * blockDim.y + threadIdx.y;
int i = index_y * imgWidth + index_x;
if( index_x < imgWidth && index_y < imgHeight ) {
int counter = 0;
double a, b, x, y, ax, ay;
if (imgWidth > imgHeight) {
ax = ( ( double )imgWidth ) / imgHeight;
ay = 1;
} else {
ax = 1;
ay = ( ( double )imgHeight ) / imgWidth;
}
a = midX + 2 * ax * scale * ( double )( 2 * index_x - imgWidth ) / imgWidth;
b = midY + 2 * ay * scale * ( double )( 2 * index_y - imgHeight ) / imgHeight;
if( julia ) {
x = juliaX;
y = juliaY;
} else {
x = a;
y = b;
}
double asq = a * a, bsq = b * b;
double atemp;
double r2 = 2 << 16;
//calculate mandelnumber
while( ( asq + bsq < r2 ) && ( counter < iterations ) ) {
atemp = asq - bsq + x;
b = a * b;
b += b + y;
a = atemp;
counter++;
asq = a * a;
bsq = b * b;
}
if (counter != 0)
histogram[counter]++;
counts[i] = counter;
data[2 * i] = a;
data[2 * i + 1] = b;
}
}
__global__ void coloring( int *counts, double *data, long *histogram, GLubyte *array, const int imgWidth, const int imgHeight, const int depth, const int iterations ) {
int index_x = blockIdx.x * blockDim.x + threadIdx.x;
int index_y = blockIdx.y * blockDim.y + threadIdx.y;
int i = index_y * imgWidth + index_x;
if( index_x < imgWidth && index_y < imgHeight ) {
int total = histogram[iterations];
int j = depth * i;
double hue = (double)histogram[counts[i]];
if( counts[i] < iterations ) {
double hue2 = (double)histogram[counts[i] + 1];
double x = data[2 * i];
double y = data[2 * i + 1];
double zn = x * x + y * y;
//from: COLORING DYNAMICAL SYSTEMSIN THE COMPLEX PLANE
//http://math.unipa.it/~grim/Jbarrallo.PDF
double nu = ( log( log( (double)(2 << 16) ) ) - log( (double)(0.5 * log( zn )) ) ) / log( 2. ); //lg(log(b)) - lg(log(sqrt(zn))
hue += nu * (hue2 - hue);
}
hue /= total;
//colour scheme
GLubyte colorArray[] = { 0, 0, 0, 255, 0, 0, 255, 155, 0, 255, 255, 255, 0, 0, 0 };
int length = 5;
int n = (int)( hue * ( length - 1 ) );
double h = hue * ( length - 1 ) - n;
GLubyte r1 = colorArray[3 * n];
GLubyte g1 = colorArray[3 * n + 1];
GLubyte b1 = colorArray[3 * n + 2];
GLubyte r2 = colorArray[3 * n + 3];
GLubyte g2 = colorArray[3 * n + 4];
GLubyte b2 = colorArray[3 * n + 5];
GLubyte r = r1 * ( 1 - h ) + r2 * h;
GLubyte g = g1 * ( 1 - h ) + g2 * h;
GLubyte b = b1 * ( 1 - h ) + b2 * h;
array[j] = (GLubyte)r;
array[j + 1] = (GLubyte)g;
array[j + 2] = (GLubyte)b;
}
}
__global__ void mandelNumExp( GLubyte *array, const int imgWidth, const int imgHeight, const int depth, const double midX, const double midY,
const double scale, const int iterations, const bool julia, const double juliaX, const double juliaY ) {
int index_x = blockIdx.x * blockDim.x + threadIdx.x;
int index_y = blockIdx.y * blockDim.y + threadIdx.y;
int i = index_y * imgWidth + index_x;
if( index_x < imgWidth && index_y < imgHeight ) {
int counter = 0;
double a, b, x, y, ax, ay;
if (imgWidth > imgHeight) {
ax = ( ( double )imgWidth ) / imgHeight;
ay = 1;
} else {
ax = 1;
ay = ( ( double )imgHeight ) / imgWidth;
}
a = midX + 2 * ax * scale * ( double )( 2 * index_x - imgWidth ) / imgWidth;
b = midY + 2 * ay * scale * ( double )( 2 * index_y - imgHeight ) / imgHeight;
if( julia ) {
x = juliaX;
y = juliaY;
} else {
x = a;
y = b;
}
double asq = a * a, bsq = b * b;
double atemp, btemp;
double r = 4;
double e = 0;
//double c = -0.7198;
//double d = 0.9111;
double c = juliaX;
double d = juliaY;
double ctemp, dtemp;
//c = 0;
//d = 0;
//fractal formulas
//http://www.lifesmith.com/formulas.html
//calculate mandelnumber
while( ( asq + bsq < r ) && ( counter < iterations ) ) {
/*atemp = asq - bsq + x + c;
b = a * b;
b += b + y + d;
a = atemp;*/
//mul(a, b, a, b, &a, &b);
//add(a, b, x, y, &a, &b);
//d_function[2 * i](a, b, a, b, &a, &b);
//d_function[2 * i + 1](a, b, x, y, &a, &b);
//f_mul(a, b, a, b, &a, &b);
//f_add(a, b, x, y, &a, &b);
mul(a, b, a, b, &a, &b);
add(a, b, x, y, &a, &b);
add(a, b, c, d, &a, &b);
/*mul(a, b, a, b, &a, &b);
mul(a, b, x, y, &a, &b);
add(a, b, c, d, &a, &b);*/
/*atemp = a * a * a - 3 * a * b * b + a * c - b * d + x;
b = 3 * a * a * b - b * b * b + a * d + b * c + y;
a = atemp;*/
/*mul(a, b, c, d, &ctemp, &dtemp);
//pow(a, b, 3, &a, &b);
mul(a, b, a, b, &atemp, &btemp);
mul(a, b, atemp, btemp, &a, &b);
add(a, b, ctemp, dtemp, &a, &b);
add(a, b, x, y, &a, &b);*/
counter++;
e += expf( - ( asq + bsq ) ); //do not use sqrt as it does not add much
asq = a * a;
bsq = b * b;
}
float hue;
if (counter == iterations) {
hue = 1;
} else {
hue = ( 0.025f * e - (int)(0.025f * e) );
}
GLubyte R, G, B;
//colour scheme
GLubyte colorArray[] = { 0, 0, 0, 255, 0, 0, 255, 155, 0, 255, 255, 255, 0, 0, 0 };
int length = 5;
int n = (int)( hue * ( length - 1 ) );
float h = hue * ( length - 1 ) - n;
GLubyte r1 = colorArray[3 * n];
GLubyte g1 = colorArray[3 * n + 1];
GLubyte b1 = colorArray[3 * n + 2];
GLubyte r2 = colorArray[3 * n + 3];
GLubyte g2 = colorArray[3 * n + 4];
GLubyte b2 = colorArray[3 * n + 5];
R = r1 * ( 1 - h ) + r2 * h;
G = g1 * ( 1 - h ) + g2 * h;
B = b1 * ( 1 - h ) + b2 * h;
/*double H = 1 - hue;
double L = hue;
double S = 1;
double C = (1 - abs( 2 * L - 1 ) ) * S;
double Hprime = 6 * H; //H should be in [0,360), and H' in [0,6), but H is in [0, 1), so we do this instead.
double X = C * (double)(1 - abs( Hprime - 2 * (int)( Hprime / 2 ) - 1 ) ); //C * (1 - |H' mod 2 - 1|)
double m = L - C / 2;
GLubyte bC, bX, b0;
bC = (GLubyte)( ( C + m ) * 255 );
bX = (GLubyte)( ( X + m ) * 255 );
b0 = (GLubyte)( ( 0 + m ) * 255 );
if (Hprime < 1) { R = bC; G = bX; B = b0; }
else if (Hprime < 2) { R = bX; G = bC; B = b0; }
else if (Hprime < 3) { R = b0; G = bC; B = bX; }
else if (Hprime < 4) { R = b0; G = bX; B = bC; }
else if (Hprime < 5) { R = bX; G = b0; B = bX; }
else if (Hprime < 6) { R = bC; G = b0; B = bC; }
else { R = 0; G = 0; B = 0; }*/
int j = depth * i;
array[j] = R;
array[j + 1] = G;
array[j + 2] = B;
}
}
__global__ void partialSum( long *input, const int length ) {
int id = threadIdx.x + blockDim.x * threadIdx.y +
(blockIdx.x * blockDim.x * blockDim.y) +
(blockIdx.y * blockDim.x * blockDim.y * gridDim.x);
if (id == 0) {
for (int i = 1; i < length; i++) {
input[i] += input[i - 1];
}
}
}
__device__ void mul( double a, double b, double c, double d, double *x, double *y ) {
double re = a * c - b * d;
double im = a * d + b * c;
*x = re;
*y = im;
}
__device__ void add( double a, double b, double c, double d, double *x, double *y ) {
double re = a + c;
double im = b + d;
*x = re;
*y = im;
}
__device__ void pow( double a, double b, double n, double *x, double *y ) {
double re = 1;
double im = 0;
for (int i = 0; i < n; ++i) {
mul(re, im, a, b, &re, &im);
}
*x = re;
*y = im;
}
__device__ void exp( double a, double b, double *x, double *y ) {
double ea = exp(a);
double re = ea * cos(b);
double im = ea * sin(b);
*x = re;
*y = im;
}
//__device__ func f_mul = mul;
//__device__ func f_add = add;
void CalcFractal( GLubyte *devArray, int *counts, double *data, long *histogram, double dPosX, double dPosY, double dScale, int iWidth, int iHeight,
int iDepth, int iIterations, bool bIsJulia, double dJuliaX, double dJuliaY) {
dim3 blockSize;
blockSize.x = 8;
blockSize.y = 8;
dim3 gridSize;
gridSize.x = iWidth / blockSize.x;
gridSize.y = iHeight / blockSize.y;
/*int n = 2;
func *h_function;
func *d_function;
h_function = (func*)malloc(n * iWidth * iHeight * sizeof(func));
cudaMalloc((void**) &d_function, n * iWidth * iHeight * sizeof(func));
for (int i = 0; i < iWidth * iHeight; ++i) {
cudaMemcpyToSymbol(&h_function[2 * i], f_mul, sizeof(func));
cudaMemcpyToSymbol(&h_function[2 * i + 1], f_add, sizeof(func));
}
cudaMemcpy(d_function, h_function, n * iWidth * iHeight * sizeof(func), cudaMemcpyHostToDevice);
*/
//switch( type ) {
// case FRACTAL_RENDERTYPE_HISTOGRAM:
//set histogram to 0 for all values
/*cudaMemset( histogram, 0, (iIterations + 1) * sizeof(long) );
mandelNum <<< gridSize, blockSize >>> ( counts, data, histogram, iWidth, iHeight, dPosX, dPosY, dScale, iIterations, bIsJulia, dJuliaX, dJuliaY );
partialSum <<< gridSize, blockSize >>> ( histogram, iIterations + 1 );
coloring <<< gridSize, blockSize >>> ( counts, data, histogram, devArray, iWidth, iHeight, iDepth, iIterations );*/
// break;
// case FRACTAL_RENDERTYPE_EXPONENTIAL:
//mandelNumExp <<< gridSize, blockSize >>> ( devArray, iWidth,iHeight, iDepth, dPosX, dPosY, dScale,
// iIterations, bIsJulia, dJuliaX, dJuliaY, d_function );
mandelNumExp <<< gridSize, blockSize >>> ( devArray, iWidth,iHeight, iDepth, dPosX, dPosY, dScale,
iIterations, bIsJulia, dJuliaX, dJuliaY );
// break;
//}
//free(h_function);
//cudaFree(d_function);
cudaDeviceSynchronize();
} |
17401558d891c32df2579199b9f8350c3fa58331.hip | // !!! This is a file automatically generated by hipify!!!
#include <hip/hip_runtime.h>
extern "C" void sumMatrixOnGPU2D2(float *MatA, float *MatB, float *MatC, int nx, int ny, int dimx, int dimy);
// grid 1D block 1D
// grid 2D block 2D
__global__ void sumMatrixOnGPU2D(float *MatA, float *MatB, float *MatC, int nx, int ny)
{
unsigned int ix = threadIdx.x + blockIdx.x * blockDim.x;
unsigned int iy = threadIdx.y + blockIdx.y * blockDim.y;
unsigned int idx = iy * nx + ix;
if (ix < nx && iy < ny)
MatC[idx] = MatA[idx] + MatB[idx];
}
void sumMatrixOnGPU2D2(float *MatA, float *MatB, float *MatC, int nx, int ny, int dimx, int dimy)
{
//dim3 block(dimx, 1);
//dim3 grid((nx + block.x - 1) / block.x, 1);
dim3 block(dimx, dimy);
dim3 grid((nx + block.x - 1) / block.x, (ny + block.y - 1) / block.y);
hipLaunchKernelGGL(( sumMatrixOnGPU2D), dim3(grid), dim3(block), 0, 0, MatA, MatB, MatC, nx, ny);
}
| 17401558d891c32df2579199b9f8350c3fa58331.cu | #include <cuda_runtime.h>
extern "C" void sumMatrixOnGPU2D2(float *MatA, float *MatB, float *MatC, int nx, int ny, int dimx, int dimy);
// grid 1D block 1D
// grid 2D block 2D
__global__ void sumMatrixOnGPU2D(float *MatA, float *MatB, float *MatC, int nx, int ny)
{
unsigned int ix = threadIdx.x + blockIdx.x * blockDim.x;
unsigned int iy = threadIdx.y + blockIdx.y * blockDim.y;
unsigned int idx = iy * nx + ix;
if (ix < nx && iy < ny)
MatC[idx] = MatA[idx] + MatB[idx];
}
void sumMatrixOnGPU2D2(float *MatA, float *MatB, float *MatC, int nx, int ny, int dimx, int dimy)
{
//dim3 block(dimx, 1);
//dim3 grid((nx + block.x - 1) / block.x, 1);
dim3 block(dimx, dimy);
dim3 grid((nx + block.x - 1) / block.x, (ny + block.y - 1) / block.y);
sumMatrixOnGPU2D<<<grid, block>>>(MatA, MatB, MatC, nx, ny);
}
|
0f49e02589d919e04b440c0436532fbc90572758.hip | // !!! This is a file automatically generated by hipify!!!
#include "config.h"
#include <cassert>
#include <cfloat>
#include <hip/hip_runtime_api.h>
#include <hip/hip_runtime.h>
#include <iostream>
#include <stdio.h>
#include <list>
#include <map>
#include <math.h>
#include <stdlib.h>
#include <vector>
#include <set>
#include <algorithm>
#include <iterator>
#include <fstream>
#include "../include/common.h"
#define K 1
using namespace std;
//#define cfd_SUPER_BLOCKS_PER_SM 5
//const int BLOCK_SIZE = 256;
const int cfd_nBlksPerCluster = 16;
const int cfd_nAtom = BLOCK_SIZE * MSIZE;
const int cfd_maxNeighbors = 8;
texture<float,1,hipReadModeElementType> tex_mx;
texture<float,1,hipReadModeElementType> tex_my;
texture<float,1,hipReadModeElementType> tex_mz;
texture<float,1,hipReadModeElementType> tex_energy;
texture<float,1,hipReadModeElementType> tex_density;
texture<int,1,hipReadModeElementType> tex_neighbor;
texture<float,1,hipReadModeElementType> tex_normals;
inline int * cfd_myBuildNeighborList_blkSchedule(const int nAtom,
int* neighborList, int blockSz)
{
//create non-uniform data sharing
//but avoid that tasks sharing the same data are neighbor tasks by randomization
vector<int> atomInds(nAtom);
vector<int> blkInds((nAtom+blockSz-1)/blockSz);
for(int i=0; i<blkInds.size(); ++i)
blkInds[i] = i;
random_shuffle(blkInds.begin(), blkInds.end());
int *blkOrder = (int*)malloc(blkInds.size()*sizeof(int));
for(int i=0; i<blkInds.size(); ++i)
blkOrder[i] = blkInds[i];
int j=0;
for(vector<int>::iterator it=blkInds.begin(); it!=blkInds.end(); ++it)
{
int blkInd = *it;
for(int i=0; i<blockSz; ++i)
atomInds[j++] = blkInd*blockSz + i;
}
int superBlockSz = blockSz * cfd_nBlksPerCluster;
// Build Neighbor List
for (int i = 0; i < nAtom; i++)
{
int start = i - i%superBlockSz; //difference is here
//int end = i + (superBlockSz - i%superBlockSz)-1;
int nNeighbors = 0;
do {
int j = start + rand() % superBlockSz;
if (i == j || j>=nAtom) continue; // An atom cannot be its own neighbor
neighborList[nNeighbors*nAtom + atomInds[i]] = atomInds[j];
nNeighbors ++;
} while(nNeighbors<cfd_maxNeighbors);
}
return blkOrder;
}
#define GAMMA 1.4f
#define VAR_DENSITY 0
#define VAR_MOMENTUM 1
#define NDIM 3
#define VAR_DENSITY_ENERGY (VAR_MOMENTUM+NDIM)
#define NVAR (VAR_DENSITY_ENERGY+1)
__host__ __device__ inline void compute_velocity(float& density, float3& momentum, float3& velocity)
{
velocity.x = momentum.x / density;
velocity.y = momentum.y / density;
velocity.z = momentum.z / density;
}
__host__ __device__ inline float compute_speed_sqd(float3& velocity)
{
return velocity.x*velocity.x + velocity.y*velocity.y + velocity.z*velocity.z;
}
__host__ __device__ inline float compute_pressure(float& density, float& density_energy, float& speed_sqd)
{
return (float(GAMMA)-float(1.0f))*(density_energy - float(0.5f)*density*speed_sqd);
}
__host__ __device__ inline float compute_speed_of_sound(float& density, float& pressure)
{
return sqrtf(float(GAMMA)*pressure/density);
}
__host__ __device__ __host__ inline void compute_flux_contribution(float& density, float3& momentum, float& density_energy, float& pressure, float3& velocity, float3& fc_momentum_x, float3& fc_momentum_y, float3& fc_momentum_z, float3& fc_density_energy)
{
fc_momentum_x.x = velocity.x*momentum.x + pressure;
fc_momentum_x.y = velocity.x*momentum.y;
fc_momentum_x.z = velocity.x*momentum.z;
fc_momentum_y.x = fc_momentum_x.y;
fc_momentum_y.y = velocity.y*momentum.y + pressure;
fc_momentum_y.z = velocity.y*momentum.z;
fc_momentum_z.x = fc_momentum_x.z;
fc_momentum_z.y = fc_momentum_y.z;
fc_momentum_z.z = velocity.z*momentum.z + pressure;
float de_p = density_energy+pressure;
fc_density_energy.x = velocity.x*de_p;
fc_density_energy.y = velocity.y*de_p;
fc_density_energy.z = velocity.z*de_p;
}
void check_cfd(int nelr, int* elements_surrounding_elements, float*
normals, float* density, float* mx, float* my, float* mz, float* density_energy, float* fluxes)
{
const float smoothing_coefficient = float(0.2f);
//const int i = (blockDim.x*blockIdx.x + threadIdx.x);
for(int i=0;i<MSIZE*BLOCK_SIZE;i++){
int j, nb;
float3 normal; float normal_len;
float factor;
//float density_i = variables[i + VAR_DENSITY*nelr];
float density_i = density[i];
float3 momentum_i;
//momentum_i.x = variables[i + (VAR_MOMENTUM+0)*nelr];
//momentum_i.y = variables[i + (VAR_MOMENTUM+1)*nelr];
//momentum_i.z = variables[i + (VAR_MOMENTUM+2)*nelr];
momentum_i.x = mx[i];
momentum_i.y = my[i];
momentum_i.z = mz[i];
//float density_energy_i = variables[i + VAR_DENSITY_ENERGY*nelr];
float density_energy_i = density_energy[i];
float3 velocity_i; compute_velocity(density_i, momentum_i, velocity_i);
float speed_sqd_i = compute_speed_sqd(velocity_i);
float speed_i = sqrtf(speed_sqd_i);
float pressure_i = compute_pressure(density_i, density_energy_i, speed_sqd_i);
float speed_of_sound_i = compute_speed_of_sound(density_i, pressure_i);
float3 flux_contribution_i_momentum_x, flux_contribution_i_momentum_y, flux_contribution_i_momentum_z;
float3 flux_contribution_i_density_energy;
compute_flux_contribution(density_i, momentum_i, density_energy_i, pressure_i, velocity_i, flux_contribution_i_momentum_x, flux_contribution_i_momentum_y, flux_contribution_i_momentum_z, flux_contribution_i_density_energy);
//float flux_i_density = float(0.0f);
float flux_i_density = 0.0;
float3 flux_i_momentum;
flux_i_momentum.x = float(0.0f);
flux_i_momentum.y = float(0.0f);
flux_i_momentum.z = float(0.0f);
float flux_i_density_energy = float(0.0f);
float3 velocity_nb;
float density_nb, density_energy_nb;
float3 momentum_nb;
float3 flux_contribution_nb_momentum_x, flux_contribution_nb_momentum_y, flux_contribution_nb_momentum_z;
float3 flux_contribution_nb_density_energy;
float speed_sqd_nb, speed_of_sound_nb, pressure_nb;
#pragma unroll
for(j = 0; j < cfd_maxNeighbors; j++)
{
nb = elements_surrounding_elements[i + j*nelr];
//optimal layout already
// |X for neighbor 0, X for neighbor 1, ... | Y for neighbor 0, Y for neighbor 1, ...
// |Z for neighbor 0, Z for neighbor 1, ... |
normal.x = normals[i + (j + 0*cfd_maxNeighbors)*nelr];
normal.y = normals[i + (j + 1*cfd_maxNeighbors)*nelr];
normal.z = normals[i + (j + 2*cfd_maxNeighbors)*nelr];
normal_len = sqrtf(normal.x*normal.x + normal.y*normal.y + normal.z*normal.z);
if(nb >= 0) // a legitimate neighbor
{
//density_nb = variables[nb + VAR_DENSITY*nelr];
//momentum_nb.x = variables[nb + (VAR_MOMENTUM+0)*nelr];
//momentum_nb.y = variables[nb + (VAR_MOMENTUM+1)*nelr];
//momentum_nb.z = variables[nb + (VAR_MOMENTUM+2)*nelr];
density_nb = density[nb];
momentum_nb.x = mx[nb];
momentum_nb.y = my[nb];
momentum_nb.z = mz[nb];
//density_energy_nb = variables[nb + VAR_DENSITY_ENERGY*nelr];
density_energy_nb = density_energy[nb];
compute_velocity(density_nb, momentum_nb, velocity_nb);
speed_sqd_nb = compute_speed_sqd(velocity_nb);
pressure_nb = compute_pressure(density_nb, density_energy_nb, speed_sqd_nb);
speed_of_sound_nb = compute_speed_of_sound(density_nb, pressure_nb);
compute_flux_contribution(density_nb, momentum_nb, density_energy_nb, pressure_nb, velocity_nb, flux_contribution_nb_momentum_x, flux_contribution_nb_momentum_y, flux_contribution_nb_momentum_z, flux_contribution_nb_density_energy);
// artificial viscosity
//factor = -normal_len*smoothing_coefficient*float(0.5f)*(speed_i + sqrtf(speed_sqd_nb) + speed_of_sound_i + speed_of_sound_nb);
factor = 1.3;
flux_i_density += factor*(density_i-density_nb);
flux_i_density_energy += factor*(density_energy_i-density_energy_nb);
flux_i_momentum.x += factor*(momentum_i.x-momentum_nb.x);
flux_i_momentum.y += factor*(momentum_i.y-momentum_nb.y);
flux_i_momentum.z += factor*(momentum_i.z-momentum_nb.z);
// accumulate cell-centered fluxes
factor = float(0.5f)*normal.x;
flux_i_density += factor*(momentum_nb.x+momentum_i.x);
flux_i_density_energy += factor*(flux_contribution_nb_density_energy.x+flux_contribution_i_density_energy.x);
flux_i_momentum.x += factor*(flux_contribution_nb_momentum_x.x+flux_contribution_i_momentum_x.x);
flux_i_momentum.y += factor*(flux_contribution_nb_momentum_y.x+flux_contribution_i_momentum_y.x);
flux_i_momentum.z += factor*(flux_contribution_nb_momentum_z.x+flux_contribution_i_momentum_z.x);
factor = float(0.5f)*normal.y;
flux_i_density += factor*(momentum_nb.y+momentum_i.y);
flux_i_density_energy += factor*(flux_contribution_nb_density_energy.y+flux_contribution_i_density_energy.y);
flux_i_momentum.x += factor*(flux_contribution_nb_momentum_x.y+flux_contribution_i_momentum_x.y);
flux_i_momentum.y += factor*(flux_contribution_nb_momentum_y.y+flux_contribution_i_momentum_y.y);
flux_i_momentum.z += factor*(flux_contribution_nb_momentum_z.y+flux_contribution_i_momentum_z.y);
factor = float(0.5f)*normal.z;
flux_i_density += factor*(momentum_nb.z+momentum_i.z);
flux_i_density_energy += factor*(flux_contribution_nb_density_energy.z+flux_contribution_i_density_energy.z);
flux_i_momentum.x += factor*(flux_contribution_nb_momentum_x.z+flux_contribution_i_momentum_x.z);
flux_i_momentum.y += factor*(flux_contribution_nb_momentum_y.z+flux_contribution_i_momentum_y.z);
flux_i_momentum.z += factor*(flux_contribution_nb_momentum_z.z+flux_contribution_i_momentum_z.z);
}
}
/*if(((pow((fluxes[i + VAR_DENSITY*nelr] - flux_i_density),2)/flux_i_density)>0.001)||\
((pow((fluxes[i + (VAR_MOMENTUM+0)*nelr] - flux_i_momentum.x),2)/flux_i_momentum.x)>0.001)||\
((pow((fluxes[i + (VAR_MOMENTUM+1)*nelr] - flux_i_momentum.y),2)/flux_i_momentum.y)>0.001)||\
((pow((fluxes[i + (VAR_MOMENTUM+2)*nelr] - flux_i_momentum.z),2)/flux_i_momentum.z)>0.001)||\
((pow((fluxes[i + VAR_DENSITY_ENERGY*nelr]- flux_i_density_energy),2)/flux_i_density_energy)>0.001))*/
if(((abs((fluxes[i + VAR_DENSITY*nelr] - flux_i_density)/flux_i_density)>0.01)&&(abs((fluxes[i + VAR_DENSITY*nelr] - flux_i_density))>0.01))||\
((abs((fluxes[i + (VAR_MOMENTUM+0)*nelr] - flux_i_momentum.x)/flux_i_momentum.x)>0.01)&&(abs((fluxes[i + (VAR_MOMENTUM+0)*nelr] - flux_i_momentum.x))>0.01))||\
((abs((fluxes[i + (VAR_MOMENTUM+1)*nelr] - flux_i_momentum.y)/flux_i_momentum.y)>0.01)&&(abs((fluxes[i + (VAR_MOMENTUM+1)*nelr] - flux_i_momentum.y))>0.01))||\
((abs((fluxes[i + (VAR_MOMENTUM+2)*nelr] - flux_i_momentum.z)/flux_i_momentum.z)>0.01)&&(abs((fluxes[i + (VAR_MOMENTUM+2)*nelr] - flux_i_momentum.z))>0.01))||\
((abs((fluxes[i + VAR_DENSITY_ENERGY*nelr]- flux_i_density_energy)/flux_i_density_energy)>0.01)&&(abs((fluxes[i + VAR_DENSITY_ENERGY*nelr]- flux_i_density_energy))>0.01)))
{printf("failed!%d,%f,%f,%f,%f,%f,%f,%f,%f,%f,%f\n",i,fluxes[i + VAR_DENSITY*nelr],flux_i_density,\
fluxes[i + (VAR_MOMENTUM+0)*nelr],flux_i_momentum.x,\
fluxes[i + (VAR_MOMENTUM+1)*nelr] , flux_i_momentum.y,\
fluxes[i + (VAR_MOMENTUM+2)*nelr],flux_i_momentum.z,\
fluxes[i + VAR_DENSITY_ENERGY*nelr],flux_i_density_energy);
return;}
}
printf("GOOD! passed!\n");
return;
}
__global__ void cfd_kernel(int nelr,int* elements_surrounding_elements, float*
normals, float* density, float* mx, float* my,const float* __restrict__ mz, float* density_energy, float* fluxes,int *d_flag)
{
const float smoothing_coefficient = float(0.2f);
const int i = (blockDim.x*blockIdx.x + threadIdx.x);
int j, nb;
float3 normal; float normal_len;
float factor;
//float density_i = variables[i + VAR_DENSITY*nelr];
float density_i = density[i];
float3 momentum_i;
//momentum_i.x = variables[i + (VAR_MOMENTUM+0)*nelr];
//momentum_i.y = variables[i + (VAR_MOMENTUM+1)*nelr];
//momentum_i.z = variables[i + (VAR_MOMENTUM+2)*nelr];
momentum_i.x = mx[i];
momentum_i.y = my[i];
momentum_i.z = mz[i];
//float density_energy_i = variables[i + VAR_DENSITY_ENERGY*nelr];
float density_energy_i = tex1Dfetch(tex_energy,i);//density_energy[i];
float3 velocity_i; compute_velocity(density_i, momentum_i, velocity_i);
float speed_sqd_i = compute_speed_sqd(velocity_i);
float speed_i = sqrtf(speed_sqd_i);
float pressure_i = compute_pressure(density_i, density_energy_i, speed_sqd_i);
float speed_of_sound_i = compute_speed_of_sound(density_i, pressure_i);
float3 flux_contribution_i_momentum_x, flux_contribution_i_momentum_y, flux_contribution_i_momentum_z;
float3 flux_contribution_i_density_energy;
compute_flux_contribution(density_i, momentum_i, density_energy_i, pressure_i, velocity_i, flux_contribution_i_momentum_x, flux_contribution_i_momentum_y, flux_contribution_i_momentum_z, flux_contribution_i_density_energy);
//float flux_i_density = float(0.0f);
float flux_i_density = 0.0;
float3 flux_i_momentum;
flux_i_momentum.x = float(0.0f);
flux_i_momentum.y = float(0.0f);
flux_i_momentum.z = float(0.0f);
float flux_i_density_energy = float(0.0f);
float3 velocity_nb;
float density_nb, density_energy_nb;
float3 momentum_nb;
float3 flux_contribution_nb_momentum_x, flux_contribution_nb_momentum_y, flux_contribution_nb_momentum_z;
float3 flux_contribution_nb_density_energy;
float speed_sqd_nb, speed_of_sound_nb, pressure_nb;
#pragma unroll
for(j = 0; j < cfd_maxNeighbors; j++)
{
nb = tex1Dfetch(tex_neighbor,i+j*nelr);//elements_surrounding_elements[i + j*nelr];
//optimal layout already
// |X for neighbor 0, X for neighbor 1, ... | Y for neighbor 0, Y for neighbor 1, ...
// |Z for neighbor 0, Z for neighbor 1, ... |
normal.x = tex1Dfetch(tex_normals,i + (j + 0*cfd_maxNeighbors)*nelr);//normals[i + (j + 0*cfd_maxNeighbors)*nelr];
normal.y = tex1Dfetch(tex_normals,i + (j + 1*cfd_maxNeighbors)*nelr);//normals[i + (j + 1*cfd_maxNeighbors)*nelr];
normal.z = tex1Dfetch(tex_normals,i + (j + 2*cfd_maxNeighbors)*nelr);//normals[i + (j + 2*cfd_maxNeighbors)*nelr];
normal_len = sqrtf(normal.x*normal.x + normal.y*normal.y + normal.z*normal.z);
if(nb >= 0) // a legitimate neighbor
{
//density_nb = variables[nb + VAR_DENSITY*nelr];
//momentum_nb.x = variables[nb + (VAR_MOMENTUM+0)*nelr];
//momentum_nb.y = variables[nb + (VAR_MOMENTUM+1)*nelr];
//momentum_nb.z = variables[nb + (VAR_MOMENTUM+2)*nelr];
density_nb = density[nb];
momentum_nb.x = mx[nb];
momentum_nb.y = my[nb];
momentum_nb.z = mz[nb];
//density_energy_nb = variables[nb + VAR_DENSITY_ENERGY*nelr];
density_energy_nb = tex1Dfetch(tex_energy,nb);//density_energy[nb];
compute_velocity(density_nb, momentum_nb, velocity_nb);
speed_sqd_nb = compute_speed_sqd(velocity_nb);
pressure_nb = compute_pressure(density_nb, density_energy_nb, speed_sqd_nb);
speed_of_sound_nb = compute_speed_of_sound(density_nb, pressure_nb);
compute_flux_contribution(density_nb, momentum_nb, density_energy_nb, pressure_nb, velocity_nb, flux_contribution_nb_momentum_x, flux_contribution_nb_momentum_y, flux_contribution_nb_momentum_z, flux_contribution_nb_density_energy);
// artificial viscosity
//factor = -normal_len*smoothing_coefficient*float(0.5f)*(speed_i + sqrtf(speed_sqd_nb) + speed_of_sound_i + speed_of_sound_nb);
factor = 1.3;
flux_i_density += factor*(density_i-density_nb);
flux_i_density_energy += factor*(density_energy_i-density_energy_nb);
flux_i_momentum.x += factor*(momentum_i.x-momentum_nb.x);
flux_i_momentum.y += factor*(momentum_i.y-momentum_nb.y);
flux_i_momentum.z += factor*(momentum_i.z-momentum_nb.z);
// accumulate cell-centered fluxes
factor = float(0.5f)*normal.x;
flux_i_density += factor*(momentum_nb.x+momentum_i.x);
flux_i_density_energy += factor*(flux_contribution_nb_density_energy.x+flux_contribution_i_density_energy.x);
flux_i_momentum.x += factor*(flux_contribution_nb_momentum_x.x+flux_contribution_i_momentum_x.x);
flux_i_momentum.y += factor*(flux_contribution_nb_momentum_y.x+flux_contribution_i_momentum_y.x);
flux_i_momentum.z += factor*(flux_contribution_nb_momentum_z.x+flux_contribution_i_momentum_z.x);
factor = float(0.5f)*normal.y;
flux_i_density += factor*(momentum_nb.y+momentum_i.y);
flux_i_density_energy += factor*(flux_contribution_nb_density_energy.y+flux_contribution_i_density_energy.y);
flux_i_momentum.x += factor*(flux_contribution_nb_momentum_x.y+flux_contribution_i_momentum_x.y);
flux_i_momentum.y += factor*(flux_contribution_nb_momentum_y.y+flux_contribution_i_momentum_y.y);
flux_i_momentum.z += factor*(flux_contribution_nb_momentum_z.y+flux_contribution_i_momentum_z.y);
factor = float(0.5f)*normal.z;
flux_i_density += factor*(momentum_nb.z+momentum_i.z);
flux_i_density_energy += factor*(flux_contribution_nb_density_energy.z+flux_contribution_i_density_energy.z);
flux_i_momentum.x += factor*(flux_contribution_nb_momentum_x.z+flux_contribution_i_momentum_x.z);
flux_i_momentum.y += factor*(flux_contribution_nb_momentum_y.z+flux_contribution_i_momentum_y.z);
flux_i_momentum.z += factor*(flux_contribution_nb_momentum_z.z+flux_contribution_i_momentum_z.z);
}
}
fluxes[i + VAR_DENSITY*nelr] = flux_i_density;
fluxes[i + (VAR_MOMENTUM+0)*nelr] = flux_i_momentum.x;
fluxes[i + (VAR_MOMENTUM+1)*nelr] = flux_i_momentum.y;
fluxes[i + (VAR_MOMENTUM+2)*nelr] = flux_i_momentum.z;
fluxes[i + VAR_DENSITY_ENERGY*nelr] = flux_i_density_energy;
//if (threadIdx.x==0) atomicAdd(d_flag,1);
}
int main(int argc, char **argv) {
srand(2013);
// Allocate problem data on host
//posVecType* position;
//forceVecType* force;
float *density;
float *mx;
float *my;
float *mz;
float *density_energy;
float *normals;
float *fluxes;
int* cfd_neighborList;
hipHostMalloc((void**)&density, cfd_nAtom*sizeof(float));
hipHostMalloc((void**)&mx, cfd_nAtom*sizeof(float));
hipHostMalloc((void**)&my, cfd_nAtom*sizeof(float));
hipHostMalloc((void**)&mz, cfd_nAtom*sizeof(float));
hipHostMalloc((void**)&density_energy, cfd_nAtom*sizeof(float));
hipHostMalloc((void**)&normals, cfd_nAtom*NDIM*cfd_maxNeighbors*sizeof(float));
hipHostMalloc((void**)&fluxes, cfd_nAtom*NVAR*sizeof(float));
hipHostMalloc((void**)&cfd_neighborList, cfd_nAtom*cfd_maxNeighbors*sizeof(int));
// Allocate device memory for position and force
//forceVecType* d_force;
//posVecType* d_position;
float *d_density;
float *d_mx;
float *d_my;
float *d_mz;
float *d_density_energy;
float *d_normals;
float *d_fluxes;
hipMalloc((void**)&d_density, cfd_nAtom*sizeof(float));
hipMalloc((void**)&d_mx, cfd_nAtom*sizeof(float));
hipMalloc((void**)&d_my, cfd_nAtom*sizeof(float));
hipMalloc((void**)&d_mz, cfd_nAtom*sizeof(float));
hipMalloc((void**)&d_density_energy, cfd_nAtom*sizeof(float));
hipMalloc((void**)&d_normals, cfd_nAtom*NDIM*cfd_maxNeighbors*sizeof(float));
hipMalloc((void**)&d_fluxes, cfd_nAtom*NVAR*sizeof(float));
hipMemset(d_fluxes, 0, cfd_nAtom*NVAR*sizeof(float));
//hipMemset(d_force, 0, cfd_nAtom*sizeof(forceVecType));
// Allocate device memory for neighbor list
int* d_cfd_neighborList;
hipMalloc((void**)&d_cfd_neighborList, cfd_nAtom*cfd_maxNeighbors*sizeof(int));
//cout << "Initializing test problem (this can take several "
// "minutes for large problems)\n";
// Initialize positions -- random distribution in cubic domain
// domainEdge constant specifies edge length
for (int i = 0; i < cfd_nAtom; i++)
{
density[i] = (float)(drand48());
density_energy[i] = (float)(drand48() );
mx[i] = (float)(drand48() );
my[i] = (float)(drand48() );
mz[i] = (float)(drand48() );
/*
density[i] = 1.1+i*0.01;
density_energy[i] = 1.1+i*0.01;
mx[i] = 1.1+i*0.01;
my[i] = 1.1+i*0.01;
mz[i] = 1.1+i*0.01;
*/
}
for(int i=0; i<cfd_nAtom*NDIM*cfd_maxNeighbors; ++i)
normals[i] = (float)(drand48());
cfd_myBuildNeighborList_blkSchedule(cfd_nAtom, cfd_neighborList, BLOCK_SIZE);
hipMemcpy(d_cfd_neighborList, cfd_neighborList, cfd_maxNeighbors*cfd_nAtom*sizeof(int), hipMemcpyHostToDevice);
// Copy data to GPU
hipMemcpy(d_density, density, cfd_nAtom*sizeof(float), hipMemcpyHostToDevice);
hipMemcpy(d_mx, mx, cfd_nAtom*sizeof(float), hipMemcpyHostToDevice);
hipMemcpy(d_my, my, cfd_nAtom*sizeof(float), hipMemcpyHostToDevice);
hipMemcpy(d_mz, mz, cfd_nAtom*sizeof(float), hipMemcpyHostToDevice);
hipMemcpy(d_density_energy, density_energy, cfd_nAtom*sizeof(float), hipMemcpyHostToDevice);
hipMemcpy(d_normals, normals, cfd_nAtom*NDIM*cfd_maxNeighbors*sizeof(float), hipMemcpyHostToDevice);
hipSetDeviceFlags(hipDeviceMapHost);
int *flag_cfd,*d_flag_cfd;
hipHostMalloc((void**)&flag_cfd,sizeof( int),hipHostMallocMapped);
hipHostGetDevicePointer((void**)&d_flag_cfd,(void*)flag_cfd,0);
hipBindTexture(0,tex_mx,d_mx,cfd_nAtom*sizeof(float));
hipBindTexture(0,tex_my,d_my,cfd_nAtom*sizeof(float));
hipBindTexture(0,tex_mz,d_mz,cfd_nAtom*sizeof(float));
hipBindTexture(0,tex_energy,d_density_energy,cfd_nAtom*sizeof(float));
hipBindTexture(0,tex_density,d_density,cfd_nAtom*sizeof(float));
hipBindTexture(0,tex_neighbor,d_cfd_neighborList,cfd_maxNeighbors*cfd_nAtom*sizeof(int));
hipBindTexture(0,tex_normals,d_normals,cfd_nAtom*NDIM*cfd_maxNeighbors*sizeof(float));
int cfd_gridSize = (cfd_nAtom-1+BLOCK_SIZE) / BLOCK_SIZE;
for(int i = 0; i <5; i++)
{
hipLaunchKernelGGL(( cfd_kernel), dim3(cfd_gridSize), dim3(BLOCK_SIZE), 0, 0, cfd_nAtom, d_cfd_neighborList, d_normals, d_density, d_mx, d_my, d_mz, d_density_energy,
d_fluxes,d_flag_cfd);
}
hipEvent_t kernel_start, kernel_stop;
hipEventCreate(&kernel_start);
hipEventCreate(&kernel_stop);
float kernel_time = 0.0f;
hipEventRecord(kernel_start, 0);
for(int i = 0; i <ITERATIONS; i++)
{
hipLaunchKernelGGL(( cfd_kernel), dim3(cfd_gridSize), dim3(BLOCK_SIZE), 0, 0, cfd_nAtom, d_cfd_neighborList, d_normals, d_density, d_mx, d_my, d_mz, d_density_energy,
d_fluxes,d_flag_cfd);
}
hipDeviceSynchronize();
hipEventRecord(kernel_stop, 0);
hipEventSynchronize(kernel_stop);
// get elapsed time
kernel_time = 0.0f;
hipEventElapsedTime(&kernel_time, kernel_start, kernel_stop);
kernel_time *= 1.e-3; // Convert to seconds
cout << "kernel exe time: " << kernel_time/ITERATIONS << endl;
hipMemcpy(fluxes, d_fluxes, cfd_nAtom*NVAR*sizeof(float), hipMemcpyDeviceToHost);
//check_cfd(cfd_nAtom,cfd_neighborList,normals,density,mx,my,mz,density_energy,fluxes);
//TODO:verified on small inputs
/*
ifstream fluxesF("../org/fluxes.txt");
for(int i=0; i<cfd_nAtom*NVAR; ++i) {
float f;
fluxesF >> f;
if(abs(f - fluxes[i]) > 0.001) {
fprintf(stderr, "Test failed! i = %d\n", i);
return 1;
}
}*/
// printf("Test passed!\n");
// fluxesF.close();
return 0;
}
| 0f49e02589d919e04b440c0436532fbc90572758.cu | #include "config.h"
#include <cassert>
#include <cfloat>
#include <cuda_runtime_api.h>
#include <cuda.h>
#include <iostream>
#include <stdio.h>
#include <list>
#include <map>
#include <math.h>
#include <stdlib.h>
#include <vector>
#include <set>
#include <algorithm>
#include <iterator>
#include <fstream>
#include "../include/common.h"
#define K 1
using namespace std;
//#define cfd_SUPER_BLOCKS_PER_SM 5
//const int BLOCK_SIZE = 256;
const int cfd_nBlksPerCluster = 16;
const int cfd_nAtom = BLOCK_SIZE * MSIZE;
const int cfd_maxNeighbors = 8;
texture<float,1,cudaReadModeElementType> tex_mx;
texture<float,1,cudaReadModeElementType> tex_my;
texture<float,1,cudaReadModeElementType> tex_mz;
texture<float,1,cudaReadModeElementType> tex_energy;
texture<float,1,cudaReadModeElementType> tex_density;
texture<int,1,cudaReadModeElementType> tex_neighbor;
texture<float,1,cudaReadModeElementType> tex_normals;
inline int * cfd_myBuildNeighborList_blkSchedule(const int nAtom,
int* neighborList, int blockSz)
{
//create non-uniform data sharing
//but avoid that tasks sharing the same data are neighbor tasks by randomization
vector<int> atomInds(nAtom);
vector<int> blkInds((nAtom+blockSz-1)/blockSz);
for(int i=0; i<blkInds.size(); ++i)
blkInds[i] = i;
random_shuffle(blkInds.begin(), blkInds.end());
int *blkOrder = (int*)malloc(blkInds.size()*sizeof(int));
for(int i=0; i<blkInds.size(); ++i)
blkOrder[i] = blkInds[i];
int j=0;
for(vector<int>::iterator it=blkInds.begin(); it!=blkInds.end(); ++it)
{
int blkInd = *it;
for(int i=0; i<blockSz; ++i)
atomInds[j++] = blkInd*blockSz + i;
}
int superBlockSz = blockSz * cfd_nBlksPerCluster;
// Build Neighbor List
for (int i = 0; i < nAtom; i++)
{
int start = i - i%superBlockSz; //difference is here
//int end = i + (superBlockSz - i%superBlockSz)-1;
int nNeighbors = 0;
do {
int j = start + rand() % superBlockSz;
if (i == j || j>=nAtom) continue; // An atom cannot be its own neighbor
neighborList[nNeighbors*nAtom + atomInds[i]] = atomInds[j];
nNeighbors ++;
} while(nNeighbors<cfd_maxNeighbors);
}
return blkOrder;
}
#define GAMMA 1.4f
#define VAR_DENSITY 0
#define VAR_MOMENTUM 1
#define NDIM 3
#define VAR_DENSITY_ENERGY (VAR_MOMENTUM+NDIM)
#define NVAR (VAR_DENSITY_ENERGY+1)
__host__ __device__ inline void compute_velocity(float& density, float3& momentum, float3& velocity)
{
velocity.x = momentum.x / density;
velocity.y = momentum.y / density;
velocity.z = momentum.z / density;
}
__host__ __device__ inline float compute_speed_sqd(float3& velocity)
{
return velocity.x*velocity.x + velocity.y*velocity.y + velocity.z*velocity.z;
}
__host__ __device__ inline float compute_pressure(float& density, float& density_energy, float& speed_sqd)
{
return (float(GAMMA)-float(1.0f))*(density_energy - float(0.5f)*density*speed_sqd);
}
__host__ __device__ inline float compute_speed_of_sound(float& density, float& pressure)
{
return sqrtf(float(GAMMA)*pressure/density);
}
__host__ __device__ __host__ inline void compute_flux_contribution(float& density, float3& momentum, float& density_energy, float& pressure, float3& velocity, float3& fc_momentum_x, float3& fc_momentum_y, float3& fc_momentum_z, float3& fc_density_energy)
{
fc_momentum_x.x = velocity.x*momentum.x + pressure;
fc_momentum_x.y = velocity.x*momentum.y;
fc_momentum_x.z = velocity.x*momentum.z;
fc_momentum_y.x = fc_momentum_x.y;
fc_momentum_y.y = velocity.y*momentum.y + pressure;
fc_momentum_y.z = velocity.y*momentum.z;
fc_momentum_z.x = fc_momentum_x.z;
fc_momentum_z.y = fc_momentum_y.z;
fc_momentum_z.z = velocity.z*momentum.z + pressure;
float de_p = density_energy+pressure;
fc_density_energy.x = velocity.x*de_p;
fc_density_energy.y = velocity.y*de_p;
fc_density_energy.z = velocity.z*de_p;
}
void check_cfd(int nelr, int* elements_surrounding_elements, float*
normals, float* density, float* mx, float* my, float* mz, float* density_energy, float* fluxes)
{
const float smoothing_coefficient = float(0.2f);
//const int i = (blockDim.x*blockIdx.x + threadIdx.x);
for(int i=0;i<MSIZE*BLOCK_SIZE;i++){
int j, nb;
float3 normal; float normal_len;
float factor;
//float density_i = variables[i + VAR_DENSITY*nelr];
float density_i = density[i];
float3 momentum_i;
//momentum_i.x = variables[i + (VAR_MOMENTUM+0)*nelr];
//momentum_i.y = variables[i + (VAR_MOMENTUM+1)*nelr];
//momentum_i.z = variables[i + (VAR_MOMENTUM+2)*nelr];
momentum_i.x = mx[i];
momentum_i.y = my[i];
momentum_i.z = mz[i];
//float density_energy_i = variables[i + VAR_DENSITY_ENERGY*nelr];
float density_energy_i = density_energy[i];
float3 velocity_i; compute_velocity(density_i, momentum_i, velocity_i);
float speed_sqd_i = compute_speed_sqd(velocity_i);
float speed_i = sqrtf(speed_sqd_i);
float pressure_i = compute_pressure(density_i, density_energy_i, speed_sqd_i);
float speed_of_sound_i = compute_speed_of_sound(density_i, pressure_i);
float3 flux_contribution_i_momentum_x, flux_contribution_i_momentum_y, flux_contribution_i_momentum_z;
float3 flux_contribution_i_density_energy;
compute_flux_contribution(density_i, momentum_i, density_energy_i, pressure_i, velocity_i, flux_contribution_i_momentum_x, flux_contribution_i_momentum_y, flux_contribution_i_momentum_z, flux_contribution_i_density_energy);
//float flux_i_density = float(0.0f);
float flux_i_density = 0.0;
float3 flux_i_momentum;
flux_i_momentum.x = float(0.0f);
flux_i_momentum.y = float(0.0f);
flux_i_momentum.z = float(0.0f);
float flux_i_density_energy = float(0.0f);
float3 velocity_nb;
float density_nb, density_energy_nb;
float3 momentum_nb;
float3 flux_contribution_nb_momentum_x, flux_contribution_nb_momentum_y, flux_contribution_nb_momentum_z;
float3 flux_contribution_nb_density_energy;
float speed_sqd_nb, speed_of_sound_nb, pressure_nb;
#pragma unroll
for(j = 0; j < cfd_maxNeighbors; j++)
{
nb = elements_surrounding_elements[i + j*nelr];
//optimal layout already
// |X for neighbor 0, X for neighbor 1, ... | Y for neighbor 0, Y for neighbor 1, ...
// |Z for neighbor 0, Z for neighbor 1, ... |
normal.x = normals[i + (j + 0*cfd_maxNeighbors)*nelr];
normal.y = normals[i + (j + 1*cfd_maxNeighbors)*nelr];
normal.z = normals[i + (j + 2*cfd_maxNeighbors)*nelr];
normal_len = sqrtf(normal.x*normal.x + normal.y*normal.y + normal.z*normal.z);
if(nb >= 0) // a legitimate neighbor
{
//density_nb = variables[nb + VAR_DENSITY*nelr];
//momentum_nb.x = variables[nb + (VAR_MOMENTUM+0)*nelr];
//momentum_nb.y = variables[nb + (VAR_MOMENTUM+1)*nelr];
//momentum_nb.z = variables[nb + (VAR_MOMENTUM+2)*nelr];
density_nb = density[nb];
momentum_nb.x = mx[nb];
momentum_nb.y = my[nb];
momentum_nb.z = mz[nb];
//density_energy_nb = variables[nb + VAR_DENSITY_ENERGY*nelr];
density_energy_nb = density_energy[nb];
compute_velocity(density_nb, momentum_nb, velocity_nb);
speed_sqd_nb = compute_speed_sqd(velocity_nb);
pressure_nb = compute_pressure(density_nb, density_energy_nb, speed_sqd_nb);
speed_of_sound_nb = compute_speed_of_sound(density_nb, pressure_nb);
compute_flux_contribution(density_nb, momentum_nb, density_energy_nb, pressure_nb, velocity_nb, flux_contribution_nb_momentum_x, flux_contribution_nb_momentum_y, flux_contribution_nb_momentum_z, flux_contribution_nb_density_energy);
// artificial viscosity
//factor = -normal_len*smoothing_coefficient*float(0.5f)*(speed_i + sqrtf(speed_sqd_nb) + speed_of_sound_i + speed_of_sound_nb);
factor = 1.3;
flux_i_density += factor*(density_i-density_nb);
flux_i_density_energy += factor*(density_energy_i-density_energy_nb);
flux_i_momentum.x += factor*(momentum_i.x-momentum_nb.x);
flux_i_momentum.y += factor*(momentum_i.y-momentum_nb.y);
flux_i_momentum.z += factor*(momentum_i.z-momentum_nb.z);
// accumulate cell-centered fluxes
factor = float(0.5f)*normal.x;
flux_i_density += factor*(momentum_nb.x+momentum_i.x);
flux_i_density_energy += factor*(flux_contribution_nb_density_energy.x+flux_contribution_i_density_energy.x);
flux_i_momentum.x += factor*(flux_contribution_nb_momentum_x.x+flux_contribution_i_momentum_x.x);
flux_i_momentum.y += factor*(flux_contribution_nb_momentum_y.x+flux_contribution_i_momentum_y.x);
flux_i_momentum.z += factor*(flux_contribution_nb_momentum_z.x+flux_contribution_i_momentum_z.x);
factor = float(0.5f)*normal.y;
flux_i_density += factor*(momentum_nb.y+momentum_i.y);
flux_i_density_energy += factor*(flux_contribution_nb_density_energy.y+flux_contribution_i_density_energy.y);
flux_i_momentum.x += factor*(flux_contribution_nb_momentum_x.y+flux_contribution_i_momentum_x.y);
flux_i_momentum.y += factor*(flux_contribution_nb_momentum_y.y+flux_contribution_i_momentum_y.y);
flux_i_momentum.z += factor*(flux_contribution_nb_momentum_z.y+flux_contribution_i_momentum_z.y);
factor = float(0.5f)*normal.z;
flux_i_density += factor*(momentum_nb.z+momentum_i.z);
flux_i_density_energy += factor*(flux_contribution_nb_density_energy.z+flux_contribution_i_density_energy.z);
flux_i_momentum.x += factor*(flux_contribution_nb_momentum_x.z+flux_contribution_i_momentum_x.z);
flux_i_momentum.y += factor*(flux_contribution_nb_momentum_y.z+flux_contribution_i_momentum_y.z);
flux_i_momentum.z += factor*(flux_contribution_nb_momentum_z.z+flux_contribution_i_momentum_z.z);
}
}
/*if(((pow((fluxes[i + VAR_DENSITY*nelr] - flux_i_density),2)/flux_i_density)>0.001)||\
((pow((fluxes[i + (VAR_MOMENTUM+0)*nelr] - flux_i_momentum.x),2)/flux_i_momentum.x)>0.001)||\
((pow((fluxes[i + (VAR_MOMENTUM+1)*nelr] - flux_i_momentum.y),2)/flux_i_momentum.y)>0.001)||\
((pow((fluxes[i + (VAR_MOMENTUM+2)*nelr] - flux_i_momentum.z),2)/flux_i_momentum.z)>0.001)||\
((pow((fluxes[i + VAR_DENSITY_ENERGY*nelr]- flux_i_density_energy),2)/flux_i_density_energy)>0.001))*/
if(((abs((fluxes[i + VAR_DENSITY*nelr] - flux_i_density)/flux_i_density)>0.01)&&(abs((fluxes[i + VAR_DENSITY*nelr] - flux_i_density))>0.01))||\
((abs((fluxes[i + (VAR_MOMENTUM+0)*nelr] - flux_i_momentum.x)/flux_i_momentum.x)>0.01)&&(abs((fluxes[i + (VAR_MOMENTUM+0)*nelr] - flux_i_momentum.x))>0.01))||\
((abs((fluxes[i + (VAR_MOMENTUM+1)*nelr] - flux_i_momentum.y)/flux_i_momentum.y)>0.01)&&(abs((fluxes[i + (VAR_MOMENTUM+1)*nelr] - flux_i_momentum.y))>0.01))||\
((abs((fluxes[i + (VAR_MOMENTUM+2)*nelr] - flux_i_momentum.z)/flux_i_momentum.z)>0.01)&&(abs((fluxes[i + (VAR_MOMENTUM+2)*nelr] - flux_i_momentum.z))>0.01))||\
((abs((fluxes[i + VAR_DENSITY_ENERGY*nelr]- flux_i_density_energy)/flux_i_density_energy)>0.01)&&(abs((fluxes[i + VAR_DENSITY_ENERGY*nelr]- flux_i_density_energy))>0.01)))
{printf("failed!%d,%f,%f,%f,%f,%f,%f,%f,%f,%f,%f\n",i,fluxes[i + VAR_DENSITY*nelr],flux_i_density,\
fluxes[i + (VAR_MOMENTUM+0)*nelr],flux_i_momentum.x,\
fluxes[i + (VAR_MOMENTUM+1)*nelr] , flux_i_momentum.y,\
fluxes[i + (VAR_MOMENTUM+2)*nelr],flux_i_momentum.z,\
fluxes[i + VAR_DENSITY_ENERGY*nelr],flux_i_density_energy);
return;}
}
printf("GOOD! passed!\n");
return;
}
__global__ void cfd_kernel(int nelr,int* elements_surrounding_elements, float*
normals, float* density, float* mx, float* my,const float* __restrict__ mz, float* density_energy, float* fluxes,int *d_flag)
{
const float smoothing_coefficient = float(0.2f);
const int i = (blockDim.x*blockIdx.x + threadIdx.x);
int j, nb;
float3 normal; float normal_len;
float factor;
//float density_i = variables[i + VAR_DENSITY*nelr];
float density_i = density[i];
float3 momentum_i;
//momentum_i.x = variables[i + (VAR_MOMENTUM+0)*nelr];
//momentum_i.y = variables[i + (VAR_MOMENTUM+1)*nelr];
//momentum_i.z = variables[i + (VAR_MOMENTUM+2)*nelr];
momentum_i.x = mx[i];
momentum_i.y = my[i];
momentum_i.z = mz[i];
//float density_energy_i = variables[i + VAR_DENSITY_ENERGY*nelr];
float density_energy_i = tex1Dfetch(tex_energy,i);//density_energy[i];
float3 velocity_i; compute_velocity(density_i, momentum_i, velocity_i);
float speed_sqd_i = compute_speed_sqd(velocity_i);
float speed_i = sqrtf(speed_sqd_i);
float pressure_i = compute_pressure(density_i, density_energy_i, speed_sqd_i);
float speed_of_sound_i = compute_speed_of_sound(density_i, pressure_i);
float3 flux_contribution_i_momentum_x, flux_contribution_i_momentum_y, flux_contribution_i_momentum_z;
float3 flux_contribution_i_density_energy;
compute_flux_contribution(density_i, momentum_i, density_energy_i, pressure_i, velocity_i, flux_contribution_i_momentum_x, flux_contribution_i_momentum_y, flux_contribution_i_momentum_z, flux_contribution_i_density_energy);
//float flux_i_density = float(0.0f);
float flux_i_density = 0.0;
float3 flux_i_momentum;
flux_i_momentum.x = float(0.0f);
flux_i_momentum.y = float(0.0f);
flux_i_momentum.z = float(0.0f);
float flux_i_density_energy = float(0.0f);
float3 velocity_nb;
float density_nb, density_energy_nb;
float3 momentum_nb;
float3 flux_contribution_nb_momentum_x, flux_contribution_nb_momentum_y, flux_contribution_nb_momentum_z;
float3 flux_contribution_nb_density_energy;
float speed_sqd_nb, speed_of_sound_nb, pressure_nb;
#pragma unroll
for(j = 0; j < cfd_maxNeighbors; j++)
{
nb = tex1Dfetch(tex_neighbor,i+j*nelr);//elements_surrounding_elements[i + j*nelr];
//optimal layout already
// |X for neighbor 0, X for neighbor 1, ... | Y for neighbor 0, Y for neighbor 1, ...
// |Z for neighbor 0, Z for neighbor 1, ... |
normal.x = tex1Dfetch(tex_normals,i + (j + 0*cfd_maxNeighbors)*nelr);//normals[i + (j + 0*cfd_maxNeighbors)*nelr];
normal.y = tex1Dfetch(tex_normals,i + (j + 1*cfd_maxNeighbors)*nelr);//normals[i + (j + 1*cfd_maxNeighbors)*nelr];
normal.z = tex1Dfetch(tex_normals,i + (j + 2*cfd_maxNeighbors)*nelr);//normals[i + (j + 2*cfd_maxNeighbors)*nelr];
normal_len = sqrtf(normal.x*normal.x + normal.y*normal.y + normal.z*normal.z);
if(nb >= 0) // a legitimate neighbor
{
//density_nb = variables[nb + VAR_DENSITY*nelr];
//momentum_nb.x = variables[nb + (VAR_MOMENTUM+0)*nelr];
//momentum_nb.y = variables[nb + (VAR_MOMENTUM+1)*nelr];
//momentum_nb.z = variables[nb + (VAR_MOMENTUM+2)*nelr];
density_nb = density[nb];
momentum_nb.x = mx[nb];
momentum_nb.y = my[nb];
momentum_nb.z = mz[nb];
//density_energy_nb = variables[nb + VAR_DENSITY_ENERGY*nelr];
density_energy_nb = tex1Dfetch(tex_energy,nb);//density_energy[nb];
compute_velocity(density_nb, momentum_nb, velocity_nb);
speed_sqd_nb = compute_speed_sqd(velocity_nb);
pressure_nb = compute_pressure(density_nb, density_energy_nb, speed_sqd_nb);
speed_of_sound_nb = compute_speed_of_sound(density_nb, pressure_nb);
compute_flux_contribution(density_nb, momentum_nb, density_energy_nb, pressure_nb, velocity_nb, flux_contribution_nb_momentum_x, flux_contribution_nb_momentum_y, flux_contribution_nb_momentum_z, flux_contribution_nb_density_energy);
// artificial viscosity
//factor = -normal_len*smoothing_coefficient*float(0.5f)*(speed_i + sqrtf(speed_sqd_nb) + speed_of_sound_i + speed_of_sound_nb);
factor = 1.3;
flux_i_density += factor*(density_i-density_nb);
flux_i_density_energy += factor*(density_energy_i-density_energy_nb);
flux_i_momentum.x += factor*(momentum_i.x-momentum_nb.x);
flux_i_momentum.y += factor*(momentum_i.y-momentum_nb.y);
flux_i_momentum.z += factor*(momentum_i.z-momentum_nb.z);
// accumulate cell-centered fluxes
factor = float(0.5f)*normal.x;
flux_i_density += factor*(momentum_nb.x+momentum_i.x);
flux_i_density_energy += factor*(flux_contribution_nb_density_energy.x+flux_contribution_i_density_energy.x);
flux_i_momentum.x += factor*(flux_contribution_nb_momentum_x.x+flux_contribution_i_momentum_x.x);
flux_i_momentum.y += factor*(flux_contribution_nb_momentum_y.x+flux_contribution_i_momentum_y.x);
flux_i_momentum.z += factor*(flux_contribution_nb_momentum_z.x+flux_contribution_i_momentum_z.x);
factor = float(0.5f)*normal.y;
flux_i_density += factor*(momentum_nb.y+momentum_i.y);
flux_i_density_energy += factor*(flux_contribution_nb_density_energy.y+flux_contribution_i_density_energy.y);
flux_i_momentum.x += factor*(flux_contribution_nb_momentum_x.y+flux_contribution_i_momentum_x.y);
flux_i_momentum.y += factor*(flux_contribution_nb_momentum_y.y+flux_contribution_i_momentum_y.y);
flux_i_momentum.z += factor*(flux_contribution_nb_momentum_z.y+flux_contribution_i_momentum_z.y);
factor = float(0.5f)*normal.z;
flux_i_density += factor*(momentum_nb.z+momentum_i.z);
flux_i_density_energy += factor*(flux_contribution_nb_density_energy.z+flux_contribution_i_density_energy.z);
flux_i_momentum.x += factor*(flux_contribution_nb_momentum_x.z+flux_contribution_i_momentum_x.z);
flux_i_momentum.y += factor*(flux_contribution_nb_momentum_y.z+flux_contribution_i_momentum_y.z);
flux_i_momentum.z += factor*(flux_contribution_nb_momentum_z.z+flux_contribution_i_momentum_z.z);
}
}
fluxes[i + VAR_DENSITY*nelr] = flux_i_density;
fluxes[i + (VAR_MOMENTUM+0)*nelr] = flux_i_momentum.x;
fluxes[i + (VAR_MOMENTUM+1)*nelr] = flux_i_momentum.y;
fluxes[i + (VAR_MOMENTUM+2)*nelr] = flux_i_momentum.z;
fluxes[i + VAR_DENSITY_ENERGY*nelr] = flux_i_density_energy;
//if (threadIdx.x==0) atomicAdd(d_flag,1);
}
int main(int argc, char **argv) {
srand(2013);
// Allocate problem data on host
//posVecType* position;
//forceVecType* force;
float *density;
float *mx;
float *my;
float *mz;
float *density_energy;
float *normals;
float *fluxes;
int* cfd_neighborList;
cudaMallocHost((void**)&density, cfd_nAtom*sizeof(float));
cudaMallocHost((void**)&mx, cfd_nAtom*sizeof(float));
cudaMallocHost((void**)&my, cfd_nAtom*sizeof(float));
cudaMallocHost((void**)&mz, cfd_nAtom*sizeof(float));
cudaMallocHost((void**)&density_energy, cfd_nAtom*sizeof(float));
cudaMallocHost((void**)&normals, cfd_nAtom*NDIM*cfd_maxNeighbors*sizeof(float));
cudaMallocHost((void**)&fluxes, cfd_nAtom*NVAR*sizeof(float));
cudaMallocHost((void**)&cfd_neighborList, cfd_nAtom*cfd_maxNeighbors*sizeof(int));
// Allocate device memory for position and force
//forceVecType* d_force;
//posVecType* d_position;
float *d_density;
float *d_mx;
float *d_my;
float *d_mz;
float *d_density_energy;
float *d_normals;
float *d_fluxes;
cudaMalloc((void**)&d_density, cfd_nAtom*sizeof(float));
cudaMalloc((void**)&d_mx, cfd_nAtom*sizeof(float));
cudaMalloc((void**)&d_my, cfd_nAtom*sizeof(float));
cudaMalloc((void**)&d_mz, cfd_nAtom*sizeof(float));
cudaMalloc((void**)&d_density_energy, cfd_nAtom*sizeof(float));
cudaMalloc((void**)&d_normals, cfd_nAtom*NDIM*cfd_maxNeighbors*sizeof(float));
cudaMalloc((void**)&d_fluxes, cfd_nAtom*NVAR*sizeof(float));
cudaMemset(d_fluxes, 0, cfd_nAtom*NVAR*sizeof(float));
//cudaMemset(d_force, 0, cfd_nAtom*sizeof(forceVecType));
// Allocate device memory for neighbor list
int* d_cfd_neighborList;
cudaMalloc((void**)&d_cfd_neighborList, cfd_nAtom*cfd_maxNeighbors*sizeof(int));
//cout << "Initializing test problem (this can take several "
// "minutes for large problems)\n";
// Initialize positions -- random distribution in cubic domain
// domainEdge constant specifies edge length
for (int i = 0; i < cfd_nAtom; i++)
{
density[i] = (float)(drand48());
density_energy[i] = (float)(drand48() );
mx[i] = (float)(drand48() );
my[i] = (float)(drand48() );
mz[i] = (float)(drand48() );
/*
density[i] = 1.1+i*0.01;
density_energy[i] = 1.1+i*0.01;
mx[i] = 1.1+i*0.01;
my[i] = 1.1+i*0.01;
mz[i] = 1.1+i*0.01;
*/
}
for(int i=0; i<cfd_nAtom*NDIM*cfd_maxNeighbors; ++i)
normals[i] = (float)(drand48());
cfd_myBuildNeighborList_blkSchedule(cfd_nAtom, cfd_neighborList, BLOCK_SIZE);
cudaMemcpy(d_cfd_neighborList, cfd_neighborList, cfd_maxNeighbors*cfd_nAtom*sizeof(int), cudaMemcpyHostToDevice);
// Copy data to GPU
cudaMemcpy(d_density, density, cfd_nAtom*sizeof(float), cudaMemcpyHostToDevice);
cudaMemcpy(d_mx, mx, cfd_nAtom*sizeof(float), cudaMemcpyHostToDevice);
cudaMemcpy(d_my, my, cfd_nAtom*sizeof(float), cudaMemcpyHostToDevice);
cudaMemcpy(d_mz, mz, cfd_nAtom*sizeof(float), cudaMemcpyHostToDevice);
cudaMemcpy(d_density_energy, density_energy, cfd_nAtom*sizeof(float), cudaMemcpyHostToDevice);
cudaMemcpy(d_normals, normals, cfd_nAtom*NDIM*cfd_maxNeighbors*sizeof(float), cudaMemcpyHostToDevice);
cudaSetDeviceFlags(cudaDeviceMapHost);
int *flag_cfd,*d_flag_cfd;
cudaHostAlloc((void**)&flag_cfd,sizeof( int),cudaHostAllocMapped);
cudaHostGetDevicePointer((void**)&d_flag_cfd,(void*)flag_cfd,0);
cudaBindTexture(0,tex_mx,d_mx,cfd_nAtom*sizeof(float));
cudaBindTexture(0,tex_my,d_my,cfd_nAtom*sizeof(float));
cudaBindTexture(0,tex_mz,d_mz,cfd_nAtom*sizeof(float));
cudaBindTexture(0,tex_energy,d_density_energy,cfd_nAtom*sizeof(float));
cudaBindTexture(0,tex_density,d_density,cfd_nAtom*sizeof(float));
cudaBindTexture(0,tex_neighbor,d_cfd_neighborList,cfd_maxNeighbors*cfd_nAtom*sizeof(int));
cudaBindTexture(0,tex_normals,d_normals,cfd_nAtom*NDIM*cfd_maxNeighbors*sizeof(float));
int cfd_gridSize = (cfd_nAtom-1+BLOCK_SIZE) / BLOCK_SIZE;
for(int i = 0; i <5; i++)
{
cfd_kernel<<<cfd_gridSize, BLOCK_SIZE>>>(cfd_nAtom, d_cfd_neighborList, d_normals, d_density, d_mx, d_my, d_mz, d_density_energy,
d_fluxes,d_flag_cfd);
}
cudaEvent_t kernel_start, kernel_stop;
cudaEventCreate(&kernel_start);
cudaEventCreate(&kernel_stop);
float kernel_time = 0.0f;
cudaEventRecord(kernel_start, 0);
for(int i = 0; i <ITERATIONS; i++)
{
cfd_kernel<<<cfd_gridSize, BLOCK_SIZE>>>(cfd_nAtom, d_cfd_neighborList, d_normals, d_density, d_mx, d_my, d_mz, d_density_energy,
d_fluxes,d_flag_cfd);
}
cudaDeviceSynchronize();
cudaEventRecord(kernel_stop, 0);
cudaEventSynchronize(kernel_stop);
// get elapsed time
kernel_time = 0.0f;
cudaEventElapsedTime(&kernel_time, kernel_start, kernel_stop);
kernel_time *= 1.e-3; // Convert to seconds
cout << "kernel exe time: " << kernel_time/ITERATIONS << endl;
cudaMemcpy(fluxes, d_fluxes, cfd_nAtom*NVAR*sizeof(float), cudaMemcpyDeviceToHost);
//check_cfd(cfd_nAtom,cfd_neighborList,normals,density,mx,my,mz,density_energy,fluxes);
//TODO:verified on small inputs
/*
ifstream fluxesF("../org/fluxes.txt");
for(int i=0; i<cfd_nAtom*NVAR; ++i) {
float f;
fluxesF >> f;
if(abs(f - fluxes[i]) > 0.001) {
fprintf(stderr, "Test failed! i = %d\n", i);
return 1;
}
}*/
// printf("Test passed!\n");
// fluxesF.close();
return 0;
}
|
a4d2d845233cdce95f5892cceecbb1db84d2680f.hip | // !!! This is a file automatically generated by hipify!!!
#include <ATen/ATen.h>
#include <ATen/AccumulateType.h>
#include <ATen/hip/HIPContext.h>
#include <ATen/hip/Exceptions.h>
// Another possibility:
// #include <torch/all.h>
#include <assert.h>
#include "type_shim.h"
#include "multi_tensor_apply.cuh"
#define BLOCK_SIZE 512
#define ILP 4
typedef enum{
ADAM_MODE_0 =0, // L2 regularization mode
ADAM_MODE_1 =1 // Decoupled weight decay mode(AdamW)
} adamMode_t;
using MATH_T = float;
template<typename T>
struct AdamFunctor
{
__device__ __forceinline__ void operator()(
int chunk_size,
volatile int* noop_gmem,
TensorListMetadata<4>& tl,
const float beta1,
const float beta2,
const float beta1_correction,
const float beta2_correction,
const float epsilon,
const float lr,
adamMode_t mode,
const float decay)
{
// I'd like this kernel to propagate infs/nans.
// if(*noop_gmem == 1)
// return;
int tensor_loc = tl.block_to_tensor[blockIdx.x];
// potentially use to pass in list of scalar
// int tensor_num = tl.start_tensor_this_launch + tensor_loc;
int chunk_idx = tl.block_to_chunk[blockIdx.x];
int n = tl.sizes[tensor_loc];
T* g = (T*)tl.addresses[0][tensor_loc];
g += chunk_idx*chunk_size;
T* p = (T*)tl.addresses[1][tensor_loc];
p += chunk_idx*chunk_size;
T* m = (T*)tl.addresses[2][tensor_loc];
m += chunk_idx*chunk_size;
T* v = (T*)tl.addresses[3][tensor_loc];
v += chunk_idx*chunk_size;
n -= chunk_idx*chunk_size;
// see note in multi_tensor_scale_kernel.cu
for(int i_start = 0;
i_start < n && i_start < chunk_size;
i_start += blockDim.x*ILP)
{
MATH_T r_g[ILP];
MATH_T r_p[ILP];
MATH_T r_m[ILP];
MATH_T r_v[ILP];
#pragma unroll
for(int ii = 0; ii < ILP; ii++)
{
int i = i_start + threadIdx.x + ii*blockDim.x;
if(i < n && i < chunk_size)
{
r_g[ii] = g[i];
r_p[ii] = p[i];
r_m[ii] = m[i];
r_v[ii] = v[i];
} else {
r_g[ii] = MATH_T(0);
r_p[ii] = MATH_T(0);
r_m[ii] = MATH_T(0);
r_v[ii] = MATH_T(0);
}
}
#pragma unroll
for(int ii = 0; ii < ILP; ii++)
{
if(mode == ADAM_MODE_0) { // L2
r_g[ii] = r_g[ii] + (decay * r_p[ii]);
r_m[ii] = beta1 * r_m[ii] + (1-beta1) * r_g[ii];
r_v[ii] = beta2 * r_v[ii] + (1-beta2) * r_g[ii] * r_g[ii];
MATH_T next_m_unbiased = r_m[ii] / beta1_correction;
MATH_T next_v_unbiased = r_v[ii] / beta2_correction;
MATH_T denom = sqrtf(next_v_unbiased) + epsilon;
MATH_T update = next_m_unbiased / denom;
r_p[ii] = r_p[ii] - (lr * update);
}
else { // weight decay
r_m[ii] = beta1 * r_m[ii] + (1-beta1) * r_g[ii];
r_v[ii] = beta2 * r_v[ii] + (1-beta2) * r_g[ii] * r_g[ii];
MATH_T next_m_unbiased = r_m[ii] / beta1_correction;
MATH_T next_v_unbiased = r_v[ii] / beta2_correction;
MATH_T denom = sqrtf(next_v_unbiased) + epsilon;
MATH_T update = (next_m_unbiased / denom) + (decay * r_p[ii]);
r_p[ii] = r_p[ii] - (lr * update);
}
}
#pragma unroll
for(int ii = 0; ii < ILP; ii++)
{
int i = i_start + threadIdx.x + ii*blockDim.x;
if(i < n && i < chunk_size)
{
p[i] = r_p[ii];
m[i] = r_m[ii];
v[i] = r_v[ii];
}
}
}
}
};
template<typename T>
struct AdamCapturableFunctor
{
__device__ __forceinline__ void operator()(
int chunk_size,
volatile int* noop_gmem,
TensorListMetadata<4>& tl,
const float beta1,
const float beta2,
const int* step,
const int bias_correction,
const float epsilon,
const float* lr,
adamMode_t mode,
const float decay,
const float* inv_scale)
{
if(*noop_gmem == 1)
return;
float beta1_correction = 1.0f, beta2_correction = 1.0f;
if (bias_correction == 1) {
beta1_correction = 1 - pow(beta1, *step);
beta2_correction = 1 - pow(beta2, *step);
}
int tensor_loc = tl.block_to_tensor[blockIdx.x];
// potentially use to pass in list of scalar
// int tensor_num = tl.start_tensor_this_launch + tensor_loc;
int chunk_idx = tl.block_to_chunk[blockIdx.x];
int n = tl.sizes[tensor_loc];
T* g = (T*)tl.addresses[0][tensor_loc];
g += chunk_idx*chunk_size;
T* p = (T*)tl.addresses[1][tensor_loc];
p += chunk_idx*chunk_size;
T* m = (T*)tl.addresses[2][tensor_loc];
m += chunk_idx*chunk_size;
T* v = (T*)tl.addresses[3][tensor_loc];
v += chunk_idx*chunk_size;
n -= chunk_idx*chunk_size;
// see note in multi_tensor_scale_kernel.cu
for(int i_start = 0;
i_start < n && i_start < chunk_size;
i_start += blockDim.x*ILP)
{
MATH_T r_g[ILP];
MATH_T r_p[ILP];
MATH_T r_m[ILP];
MATH_T r_v[ILP];
#pragma unroll
for(int ii = 0; ii < ILP; ii++)
{
int i = i_start + threadIdx.x + ii*blockDim.x;
if(i < n && i < chunk_size)
{
g[i] = g[i] * (*inv_scale);
r_g[ii] = g[i];
r_p[ii] = p[i];
r_m[ii] = m[i];
r_v[ii] = v[i];
} else {
r_g[ii] = MATH_T(0);
r_p[ii] = MATH_T(0);
r_m[ii] = MATH_T(0);
r_v[ii] = MATH_T(0);
}
}
#pragma unroll
for(int ii = 0; ii < ILP; ii++)
{
if(mode == ADAM_MODE_0) { // L2
r_g[ii] = r_g[ii] + (decay * r_p[ii]);
r_m[ii] = beta1 * r_m[ii] + (1-beta1) * r_g[ii];
r_v[ii] = beta2 * r_v[ii] + (1-beta2) * r_g[ii] * r_g[ii];
MATH_T next_m_unbiased = r_m[ii] / beta1_correction;
MATH_T next_v_unbiased = r_v[ii] / beta2_correction;
MATH_T denom = sqrtf(next_v_unbiased) + epsilon;
MATH_T update = next_m_unbiased / denom;
r_p[ii] = r_p[ii] - (*lr * update);
}
else { // weight decay
r_m[ii] = beta1 * r_m[ii] + (1-beta1) * r_g[ii];
r_v[ii] = beta2 * r_v[ii] + (1-beta2) * r_g[ii] * r_g[ii];
MATH_T next_m_unbiased = r_m[ii] / beta1_correction;
MATH_T next_v_unbiased = r_v[ii] / beta2_correction;
MATH_T denom = sqrtf(next_v_unbiased) + epsilon;
MATH_T update = (next_m_unbiased / denom) + (decay * r_p[ii]);
r_p[ii] = r_p[ii] - (*lr * update);
}
}
#pragma unroll
for(int ii = 0; ii < ILP; ii++)
{
int i = i_start + threadIdx.x + ii*blockDim.x;
if(i < n && i < chunk_size)
{
p[i] = r_p[ii];
m[i] = r_m[ii];
v[i] = r_v[ii];
}
}
}
}
};
void multi_tensor_adam_cuda(
int chunk_size,
at::Tensor noop_flag,
std::vector<std::vector<at::Tensor>> tensor_lists,
const float lr,
const float beta1,
const float beta2,
const float epsilon,
const int step,
const int mode,
const int bias_correction,
const float weight_decay)
{
using namespace at;
// Handle bias correction mode
float bias_correction1 = 1.0f, bias_correction2 = 1.0f;
if (bias_correction == 1) {
bias_correction1 = 1 - ::pow(beta1, step);
bias_correction2 = 1 - ::pow(beta2, step);
}
// Assume single type across p,g,m1,m2 now
DISPATCH_DOUBLE_FLOAT_HALF_AND_BFLOAT(
tensor_lists[0][0].scalar_type(), 0, "adam",
multi_tensor_apply<4>(
BLOCK_SIZE,
chunk_size,
noop_flag,
tensor_lists,
AdamFunctor<scalar_t_0>(),
beta1,
beta2,
bias_correction1,
bias_correction2,
epsilon,
lr,
(adamMode_t) mode,
weight_decay); )
AT_CUDA_CHECK(hipGetLastError());
}
void multi_tensor_adam_capturable_cuda(
int chunk_size,
at::Tensor noop_flag,
std::vector<std::vector<at::Tensor>> tensor_lists,
at::Tensor lr,
const float beta1,
const float beta2,
const float epsilon,
at::Tensor step,
const int mode,
const int bias_correction,
const float weight_decay,
at::Tensor inv_scale)
{
using namespace at;
DISPATCH_DOUBLE_FLOAT_HALF_AND_BFLOAT(
tensor_lists[0][0].scalar_type(), 0, "adam",
multi_tensor_apply<4>(
BLOCK_SIZE,
chunk_size,
noop_flag,
tensor_lists,
AdamCapturableFunctor<scalar_t_0>(),
beta1,
beta2,
step.data_ptr<int>(),
bias_correction,
epsilon,
lr.data_ptr<float>(),
(adamMode_t) mode,
weight_decay,
inv_scale.data_ptr<float>()); )
AT_CUDA_CHECK(hipGetLastError());
}
| a4d2d845233cdce95f5892cceecbb1db84d2680f.cu | #include <ATen/ATen.h>
#include <ATen/AccumulateType.h>
#include <ATen/cuda/CUDAContext.h>
#include <ATen/cuda/Exceptions.h>
// Another possibility:
// #include <torch/all.h>
#include <assert.h>
#include "type_shim.h"
#include "multi_tensor_apply.cuh"
#define BLOCK_SIZE 512
#define ILP 4
typedef enum{
ADAM_MODE_0 =0, // L2 regularization mode
ADAM_MODE_1 =1 // Decoupled weight decay mode(AdamW)
} adamMode_t;
using MATH_T = float;
template<typename T>
struct AdamFunctor
{
__device__ __forceinline__ void operator()(
int chunk_size,
volatile int* noop_gmem,
TensorListMetadata<4>& tl,
const float beta1,
const float beta2,
const float beta1_correction,
const float beta2_correction,
const float epsilon,
const float lr,
adamMode_t mode,
const float decay)
{
// I'd like this kernel to propagate infs/nans.
// if(*noop_gmem == 1)
// return;
int tensor_loc = tl.block_to_tensor[blockIdx.x];
// potentially use to pass in list of scalar
// int tensor_num = tl.start_tensor_this_launch + tensor_loc;
int chunk_idx = tl.block_to_chunk[blockIdx.x];
int n = tl.sizes[tensor_loc];
T* g = (T*)tl.addresses[0][tensor_loc];
g += chunk_idx*chunk_size;
T* p = (T*)tl.addresses[1][tensor_loc];
p += chunk_idx*chunk_size;
T* m = (T*)tl.addresses[2][tensor_loc];
m += chunk_idx*chunk_size;
T* v = (T*)tl.addresses[3][tensor_loc];
v += chunk_idx*chunk_size;
n -= chunk_idx*chunk_size;
// see note in multi_tensor_scale_kernel.cu
for(int i_start = 0;
i_start < n && i_start < chunk_size;
i_start += blockDim.x*ILP)
{
MATH_T r_g[ILP];
MATH_T r_p[ILP];
MATH_T r_m[ILP];
MATH_T r_v[ILP];
#pragma unroll
for(int ii = 0; ii < ILP; ii++)
{
int i = i_start + threadIdx.x + ii*blockDim.x;
if(i < n && i < chunk_size)
{
r_g[ii] = g[i];
r_p[ii] = p[i];
r_m[ii] = m[i];
r_v[ii] = v[i];
} else {
r_g[ii] = MATH_T(0);
r_p[ii] = MATH_T(0);
r_m[ii] = MATH_T(0);
r_v[ii] = MATH_T(0);
}
}
#pragma unroll
for(int ii = 0; ii < ILP; ii++)
{
if(mode == ADAM_MODE_0) { // L2
r_g[ii] = r_g[ii] + (decay * r_p[ii]);
r_m[ii] = beta1 * r_m[ii] + (1-beta1) * r_g[ii];
r_v[ii] = beta2 * r_v[ii] + (1-beta2) * r_g[ii] * r_g[ii];
MATH_T next_m_unbiased = r_m[ii] / beta1_correction;
MATH_T next_v_unbiased = r_v[ii] / beta2_correction;
MATH_T denom = sqrtf(next_v_unbiased) + epsilon;
MATH_T update = next_m_unbiased / denom;
r_p[ii] = r_p[ii] - (lr * update);
}
else { // weight decay
r_m[ii] = beta1 * r_m[ii] + (1-beta1) * r_g[ii];
r_v[ii] = beta2 * r_v[ii] + (1-beta2) * r_g[ii] * r_g[ii];
MATH_T next_m_unbiased = r_m[ii] / beta1_correction;
MATH_T next_v_unbiased = r_v[ii] / beta2_correction;
MATH_T denom = sqrtf(next_v_unbiased) + epsilon;
MATH_T update = (next_m_unbiased / denom) + (decay * r_p[ii]);
r_p[ii] = r_p[ii] - (lr * update);
}
}
#pragma unroll
for(int ii = 0; ii < ILP; ii++)
{
int i = i_start + threadIdx.x + ii*blockDim.x;
if(i < n && i < chunk_size)
{
p[i] = r_p[ii];
m[i] = r_m[ii];
v[i] = r_v[ii];
}
}
}
}
};
template<typename T>
struct AdamCapturableFunctor
{
__device__ __forceinline__ void operator()(
int chunk_size,
volatile int* noop_gmem,
TensorListMetadata<4>& tl,
const float beta1,
const float beta2,
const int* step,
const int bias_correction,
const float epsilon,
const float* lr,
adamMode_t mode,
const float decay,
const float* inv_scale)
{
if(*noop_gmem == 1)
return;
float beta1_correction = 1.0f, beta2_correction = 1.0f;
if (bias_correction == 1) {
beta1_correction = 1 - pow(beta1, *step);
beta2_correction = 1 - pow(beta2, *step);
}
int tensor_loc = tl.block_to_tensor[blockIdx.x];
// potentially use to pass in list of scalar
// int tensor_num = tl.start_tensor_this_launch + tensor_loc;
int chunk_idx = tl.block_to_chunk[blockIdx.x];
int n = tl.sizes[tensor_loc];
T* g = (T*)tl.addresses[0][tensor_loc];
g += chunk_idx*chunk_size;
T* p = (T*)tl.addresses[1][tensor_loc];
p += chunk_idx*chunk_size;
T* m = (T*)tl.addresses[2][tensor_loc];
m += chunk_idx*chunk_size;
T* v = (T*)tl.addresses[3][tensor_loc];
v += chunk_idx*chunk_size;
n -= chunk_idx*chunk_size;
// see note in multi_tensor_scale_kernel.cu
for(int i_start = 0;
i_start < n && i_start < chunk_size;
i_start += blockDim.x*ILP)
{
MATH_T r_g[ILP];
MATH_T r_p[ILP];
MATH_T r_m[ILP];
MATH_T r_v[ILP];
#pragma unroll
for(int ii = 0; ii < ILP; ii++)
{
int i = i_start + threadIdx.x + ii*blockDim.x;
if(i < n && i < chunk_size)
{
g[i] = g[i] * (*inv_scale);
r_g[ii] = g[i];
r_p[ii] = p[i];
r_m[ii] = m[i];
r_v[ii] = v[i];
} else {
r_g[ii] = MATH_T(0);
r_p[ii] = MATH_T(0);
r_m[ii] = MATH_T(0);
r_v[ii] = MATH_T(0);
}
}
#pragma unroll
for(int ii = 0; ii < ILP; ii++)
{
if(mode == ADAM_MODE_0) { // L2
r_g[ii] = r_g[ii] + (decay * r_p[ii]);
r_m[ii] = beta1 * r_m[ii] + (1-beta1) * r_g[ii];
r_v[ii] = beta2 * r_v[ii] + (1-beta2) * r_g[ii] * r_g[ii];
MATH_T next_m_unbiased = r_m[ii] / beta1_correction;
MATH_T next_v_unbiased = r_v[ii] / beta2_correction;
MATH_T denom = sqrtf(next_v_unbiased) + epsilon;
MATH_T update = next_m_unbiased / denom;
r_p[ii] = r_p[ii] - (*lr * update);
}
else { // weight decay
r_m[ii] = beta1 * r_m[ii] + (1-beta1) * r_g[ii];
r_v[ii] = beta2 * r_v[ii] + (1-beta2) * r_g[ii] * r_g[ii];
MATH_T next_m_unbiased = r_m[ii] / beta1_correction;
MATH_T next_v_unbiased = r_v[ii] / beta2_correction;
MATH_T denom = sqrtf(next_v_unbiased) + epsilon;
MATH_T update = (next_m_unbiased / denom) + (decay * r_p[ii]);
r_p[ii] = r_p[ii] - (*lr * update);
}
}
#pragma unroll
for(int ii = 0; ii < ILP; ii++)
{
int i = i_start + threadIdx.x + ii*blockDim.x;
if(i < n && i < chunk_size)
{
p[i] = r_p[ii];
m[i] = r_m[ii];
v[i] = r_v[ii];
}
}
}
}
};
void multi_tensor_adam_cuda(
int chunk_size,
at::Tensor noop_flag,
std::vector<std::vector<at::Tensor>> tensor_lists,
const float lr,
const float beta1,
const float beta2,
const float epsilon,
const int step,
const int mode,
const int bias_correction,
const float weight_decay)
{
using namespace at;
// Handle bias correction mode
float bias_correction1 = 1.0f, bias_correction2 = 1.0f;
if (bias_correction == 1) {
bias_correction1 = 1 - std::pow(beta1, step);
bias_correction2 = 1 - std::pow(beta2, step);
}
// Assume single type across p,g,m1,m2 now
DISPATCH_DOUBLE_FLOAT_HALF_AND_BFLOAT(
tensor_lists[0][0].scalar_type(), 0, "adam",
multi_tensor_apply<4>(
BLOCK_SIZE,
chunk_size,
noop_flag,
tensor_lists,
AdamFunctor<scalar_t_0>(),
beta1,
beta2,
bias_correction1,
bias_correction2,
epsilon,
lr,
(adamMode_t) mode,
weight_decay); )
AT_CUDA_CHECK(cudaGetLastError());
}
void multi_tensor_adam_capturable_cuda(
int chunk_size,
at::Tensor noop_flag,
std::vector<std::vector<at::Tensor>> tensor_lists,
at::Tensor lr,
const float beta1,
const float beta2,
const float epsilon,
at::Tensor step,
const int mode,
const int bias_correction,
const float weight_decay,
at::Tensor inv_scale)
{
using namespace at;
DISPATCH_DOUBLE_FLOAT_HALF_AND_BFLOAT(
tensor_lists[0][0].scalar_type(), 0, "adam",
multi_tensor_apply<4>(
BLOCK_SIZE,
chunk_size,
noop_flag,
tensor_lists,
AdamCapturableFunctor<scalar_t_0>(),
beta1,
beta2,
step.data_ptr<int>(),
bias_correction,
epsilon,
lr.data_ptr<float>(),
(adamMode_t) mode,
weight_decay,
inv_scale.data_ptr<float>()); )
AT_CUDA_CHECK(cudaGetLastError());
}
|
8619ecd5ed75a6496500e73a5645e91fcff615dd.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <stdio.h>
#include <stdlib.h>
#include <sys/time.h>
#include <math.h>
#define COMMENT "Histogram_GPU"
#define RGB_COMPONENT_COLOR 255
typedef struct {
unsigned char red, green, blue;
} PPMPixel;
typedef struct {
int x, y;
PPMPixel *data;
} PPMImage;
double rtclock()
{
struct timezone Tzp;
struct timeval Tp;
int stat;
stat = gettimeofday (&Tp, &Tzp);
if (stat != 0) printf("Error return from gettimeofday: %d",stat);
return(Tp.tv_sec + Tp.tv_usec*1.0e-6);
}
static PPMImage *readPPM(const char *filename) {
char buff[16];
PPMImage *img;
FILE *fp;
int c, rgb_comp_color;
fp = fopen(filename, "rb");
if (!fp) {
fprintf(stderr, "Unable to open file '%s'\n", filename);
exit(1);
}
if (!fgets(buff, sizeof(buff), fp)) {
perror(filename);
exit(1);
}
if (buff[0] != 'P' || buff[1] != '6') {
fprintf(stderr, "Invalid image format (must be 'P6')\n");
exit(1);
}
img = (PPMImage *) malloc(sizeof(PPMImage));
if (!img) {
fprintf(stderr, "Unable to allocate memory\n");
exit(1);
}
c = getc(fp);
while (c == '#') {
while (getc(fp) != '\n')
;
c = getc(fp);
}
ungetc(c, fp);
if (fscanf(fp, "%d %d", &img->x, &img->y) != 2) {
fprintf(stderr, "Invalid image size (error loading '%s')\n", filename);
exit(1);
}
if (fscanf(fp, "%d", &rgb_comp_color) != 1) {
fprintf(stderr, "Invalid rgb component (error loading '%s')\n",
filename);
exit(1);
}
if (rgb_comp_color != RGB_COMPONENT_COLOR) {
fprintf(stderr, "'%s' does not have 8-bits components\n", filename);
exit(1);
}
while (fgetc(fp) != '\n')
;
img->data = (PPMPixel*) malloc(img->x * img->y * sizeof(PPMPixel));
if (!img) {
fprintf(stderr, "Unable to allocate memory\n");
exit(1);
}
if (fread(img->data, 3 * img->x, img->y, fp) != img->y) {
fprintf(stderr, "Error loading image '%s'\n", filename);
exit(1);
}
fclose(fp);
return img;
}
int n_block = 512;
int n_thread_block = 512;
__global__
void histogram_kernel(PPMPixel *data, float n, float *hist) {
__shared__ float hist_private[64];
if (threadIdx.x < 64)
hist_private[threadIdx.x] = 0; //Inicializa o histograma privado
__syncthreads();
int i, j, k, l, x, count;
count = 0;
x = 0;
int begin = threadIdx.x + blockIdx.x * blockDim.x; //Index do inicio
int stride = blockDim.x * gridDim.x; // stride is total number of threads
for (j = 0; j <= 3; j++) {
for (k = 0; k <= 3; k++) {
for (l = 0; l <= 3; l++) {
for (i = begin; i < n; i += stride ) {
if (data[i].red == j && data[i].green == k && data[i].blue == l)
count++;
}
//printf("Bd: %d Bi: %03d Ti: %03d st: %d h: %.6f\n", blockDim.x, blockIdx.x, threadIdx.x, stride, ((float) count)/n);
atomicAdd(hist_private + x, ((float) count)/n);
count = 0;
x++;
}
}
}
__syncthreads();
if (threadIdx.x < 64)
atomicAdd(hist + threadIdx.x, (float) hist_private[threadIdx.x] ); //Juntando os histogramas
}
void Histogram(PPMImage *image, float *h) {
float n = image->y * image->x;
for (int i = 0; i < n; i++) {
image->data[i].red = floor((image->data[i].red * 4) / 256);
image->data[i].blue = floor((image->data[i].blue * 4) / 256);
image->data[i].green = floor((image->data[i].green * 4) / 256);
}
//Alocando memria na GPU e Coping inputs to device******************
PPMPixel *d_data;
float *d_h;
int size_hist = 64 * sizeof(float);
hipMalloc((void **)&d_data, n * sizeof(PPMPixel));
hipMalloc((void **)&d_h, size_hist);
hipMemcpy(d_data, image->data, n * sizeof(PPMPixel), hipMemcpyHostToDevice);
//******************************************************************
hipLaunchKernelGGL(( histogram_kernel) , dim3(n_block), dim3(n_thread_block) , 0, 0, d_data, n, d_h);
//hipDeviceSynchronize();
//Copia resultado do device para host
hipMemcpy(h, d_h, size_hist, hipMemcpyDeviceToHost);
//Liberando a memria alocada
hipFree(d_data);
hipFree(d_h);
}
int main(int argc, char *argv[]) {
if( argc != 2 ) {
printf("Too many or no one arguments supplied.\n");
}
double t_start, t_end;
int i;
char *filename = argv[1]; //Recebendo o arquivo!;
//scanf("%s", filename);
PPMImage *image = readPPM(filename);
float *h = (float*)malloc(sizeof(float) * 64);
//Inicializar h
for(i=0; i < 64; i++) h[i] = 0.0;
t_start = rtclock();
Histogram(image, h);
t_end = rtclock();
for (i = 0; i < 64; i++){
printf("%0.3f ", h[i]);
}
printf("\n");
fprintf(stdout, "\n%0.6lfs\n", t_end - t_start);
free(h);
}
| 8619ecd5ed75a6496500e73a5645e91fcff615dd.cu | #include <stdio.h>
#include <stdlib.h>
#include <sys/time.h>
#include <math.h>
#define COMMENT "Histogram_GPU"
#define RGB_COMPONENT_COLOR 255
typedef struct {
unsigned char red, green, blue;
} PPMPixel;
typedef struct {
int x, y;
PPMPixel *data;
} PPMImage;
double rtclock()
{
struct timezone Tzp;
struct timeval Tp;
int stat;
stat = gettimeofday (&Tp, &Tzp);
if (stat != 0) printf("Error return from gettimeofday: %d",stat);
return(Tp.tv_sec + Tp.tv_usec*1.0e-6);
}
static PPMImage *readPPM(const char *filename) {
char buff[16];
PPMImage *img;
FILE *fp;
int c, rgb_comp_color;
fp = fopen(filename, "rb");
if (!fp) {
fprintf(stderr, "Unable to open file '%s'\n", filename);
exit(1);
}
if (!fgets(buff, sizeof(buff), fp)) {
perror(filename);
exit(1);
}
if (buff[0] != 'P' || buff[1] != '6') {
fprintf(stderr, "Invalid image format (must be 'P6')\n");
exit(1);
}
img = (PPMImage *) malloc(sizeof(PPMImage));
if (!img) {
fprintf(stderr, "Unable to allocate memory\n");
exit(1);
}
c = getc(fp);
while (c == '#') {
while (getc(fp) != '\n')
;
c = getc(fp);
}
ungetc(c, fp);
if (fscanf(fp, "%d %d", &img->x, &img->y) != 2) {
fprintf(stderr, "Invalid image size (error loading '%s')\n", filename);
exit(1);
}
if (fscanf(fp, "%d", &rgb_comp_color) != 1) {
fprintf(stderr, "Invalid rgb component (error loading '%s')\n",
filename);
exit(1);
}
if (rgb_comp_color != RGB_COMPONENT_COLOR) {
fprintf(stderr, "'%s' does not have 8-bits components\n", filename);
exit(1);
}
while (fgetc(fp) != '\n')
;
img->data = (PPMPixel*) malloc(img->x * img->y * sizeof(PPMPixel));
if (!img) {
fprintf(stderr, "Unable to allocate memory\n");
exit(1);
}
if (fread(img->data, 3 * img->x, img->y, fp) != img->y) {
fprintf(stderr, "Error loading image '%s'\n", filename);
exit(1);
}
fclose(fp);
return img;
}
int n_block = 512;
int n_thread_block = 512;
__global__
void histogram_kernel(PPMPixel *data, float n, float *hist) {
__shared__ float hist_private[64];
if (threadIdx.x < 64)
hist_private[threadIdx.x] = 0; //Inicializa o histograma privado
__syncthreads();
int i, j, k, l, x, count;
count = 0;
x = 0;
int begin = threadIdx.x + blockIdx.x * blockDim.x; //Index do inicio
int stride = blockDim.x * gridDim.x; // stride is total number of threads
for (j = 0; j <= 3; j++) {
for (k = 0; k <= 3; k++) {
for (l = 0; l <= 3; l++) {
for (i = begin; i < n; i += stride ) {
if (data[i].red == j && data[i].green == k && data[i].blue == l)
count++;
}
//printf("Bd: %d Bi: %03d Ti: %03d st: %d h: %.6f\n", blockDim.x, blockIdx.x, threadIdx.x, stride, ((float) count)/n);
atomicAdd(hist_private + x, ((float) count)/n);
count = 0;
x++;
}
}
}
__syncthreads();
if (threadIdx.x < 64)
atomicAdd(hist + threadIdx.x, (float) hist_private[threadIdx.x] ); //Juntando os histogramas
}
void Histogram(PPMImage *image, float *h) {
float n = image->y * image->x;
for (int i = 0; i < n; i++) {
image->data[i].red = floor((image->data[i].red * 4) / 256);
image->data[i].blue = floor((image->data[i].blue * 4) / 256);
image->data[i].green = floor((image->data[i].green * 4) / 256);
}
//Alocando memória na GPU e Coping inputs to device******************
PPMPixel *d_data;
float *d_h;
int size_hist = 64 * sizeof(float);
cudaMalloc((void **)&d_data, n * sizeof(PPMPixel));
cudaMalloc((void **)&d_h, size_hist);
cudaMemcpy(d_data, image->data, n * sizeof(PPMPixel), cudaMemcpyHostToDevice);
//******************************************************************
histogram_kernel <<< n_block, n_thread_block >>> (d_data, n, d_h);
//cudaDeviceSynchronize();
//Copia resultado do device para host
cudaMemcpy(h, d_h, size_hist, cudaMemcpyDeviceToHost);
//Liberando a memória alocada
cudaFree(d_data);
cudaFree(d_h);
}
int main(int argc, char *argv[]) {
if( argc != 2 ) {
printf("Too many or no one arguments supplied.\n");
}
double t_start, t_end;
int i;
char *filename = argv[1]; //Recebendo o arquivo!;
//scanf("%s", filename);
PPMImage *image = readPPM(filename);
float *h = (float*)malloc(sizeof(float) * 64);
//Inicializar h
for(i=0; i < 64; i++) h[i] = 0.0;
t_start = rtclock();
Histogram(image, h);
t_end = rtclock();
for (i = 0; i < 64; i++){
printf("%0.3f ", h[i]);
}
printf("\n");
fprintf(stdout, "\n%0.6lfs\n", t_end - t_start);
free(h);
}
|
308f6baac6c72afd5b32fbbf449427ffa41f3e04.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <stdio.h>
#include <stdlib.h>
#include <math.h>
#include "calculation.h"
#define MUL3(x) (x & 0x80 ? ((x << 1 ^0x1b) & 0xff ^x) : ((x << 1) ^ x))
#define MUL2(x) (x & 0x80 ? (x << 1 ^0x1b) & 0xff : (x << 1))
#define BLOCKSIZE (128)
#define GRIDSIZE ((FILESIZE/16)/BLOCKSIZE) //128*26*32
#define Stream (64)
#define NBb2 (NBb << 1)
texture<int, 1, hipReadModeElementType> pt_texture;
__constant__ int rkey[44];
__shared__ unsigned char SboxCUDA[256];
__constant__ unsigned char SboxCUDAConst[256] = {
0x63, 0x7c, 0x77, 0x7b, 0xf2, 0x6b, 0x6f, 0xc5, 0x30, 0x01, 0x67, 0x2b, 0xfe, 0xd7, 0xab, 0x76,
0xca, 0x82, 0xc9, 0x7d, 0xfa, 0x59, 0x47, 0xf0, 0xad, 0xd4, 0xa2, 0xaf, 0x9c, 0xa4, 0x72, 0xc0,
0xb7, 0xfd, 0x93, 0x26, 0x36, 0x3f, 0xf7, 0xcc, 0x34, 0xa5, 0xe5, 0xf1, 0x71, 0xd8, 0x31, 0x15,
0x04, 0xc7, 0x23, 0xc3, 0x18, 0x96, 0x05, 0x9a, 0x07, 0x12, 0x80, 0xe2, 0xeb, 0x27, 0xb2, 0x75,
0x09, 0x83, 0x2c, 0x1a, 0x1b, 0x6e, 0x5a, 0xa0, 0x52, 0x3b, 0xd6, 0xb3, 0x29, 0xe3, 0x2f, 0x84,
0x53, 0xd1, 0x00, 0xed, 0x20, 0xfc, 0xb1, 0x5b, 0x6a, 0xcb, 0xbe, 0x39, 0x4a, 0x4c, 0x58, 0xcf,
0xd0, 0xef, 0xaa, 0xfb, 0x43, 0x4d, 0x33, 0x85, 0x45, 0xf9, 0x02, 0x7f, 0x50, 0x3c, 0x9f, 0xa8,
0x51, 0xa3, 0x40, 0x8f, 0x92, 0x9d, 0x38, 0xf5, 0xbc, 0xb6, 0xda, 0x21, 0x10, 0xff, 0xf3, 0xd2,
0xcd, 0x0c, 0x13, 0xec, 0x5f, 0x97, 0x44, 0x17, 0xc4, 0xa7, 0x7e, 0x3d, 0x64, 0x5d, 0x19, 0x73,
0x60, 0x81, 0x4f, 0xdc, 0x22, 0x2a, 0x90, 0x88, 0x46, 0xee, 0xb8, 0x14, 0xde, 0x5e, 0x0b, 0xdb,
0xe0, 0x32, 0x3a, 0x0a, 0x49, 0x06, 0x24, 0x5c, 0xc2, 0xd3, 0xac, 0x62, 0x91, 0x95, 0xe4, 0x79,
0xe7, 0xc8, 0x37, 0x6d, 0x8d, 0xd5, 0x4e, 0xa9, 0x6c, 0x56, 0xf4, 0xea, 0x65, 0x7a, 0xae, 0x08,
0xba, 0x78, 0x25, 0x2e, 0x1c, 0xa6, 0xb4, 0xc6, 0xe8, 0xdd, 0x74, 0x1f, 0x4b, 0xbd, 0x8b, 0x8a,
0x70, 0x3e, 0xb5, 0x66, 0x48, 0x03, 0xf6, 0x0e, 0x61, 0x35, 0x57, 0xb9, 0x86, 0xc1, 0x1d, 0x9e,
0xe1, 0xf8, 0x98, 0x11, 0x69, 0xd9, 0x8e, 0x94, 0x9b, 0x1e, 0x87, 0xe9, 0xce, 0x55, 0x28, 0xdf,
0x8c, 0xa1, 0x89, 0x0d, 0xbf, 0xe6, 0x42, 0x68, 0x41, 0x99, 0x2d, 0x0f, 0xb0, 0x54, 0xbb, 0x16
};
__global__ void device_aes_encrypt(unsigned char *pt, unsigned char *ct) {
//This kernel executes AES encryption on a GPU.
//Please modify this kernel!!
int thread_id = blockIdx.x * blockDim.x + threadIdx.x;
/* if (thread_id == 0)
printf("size = %ld\n", size);
// printf("You can use printf function to eliminate bugs in your kernel.\n");
*/
memcpy(&(SboxCUDA[threadIdx.x << 1]), &(SboxCUDAConst[threadIdx.x << 1]), 2);
__syncthreads();
unsigned char cb[NBb2];
int *cw = (int *) cb;
cw[0] = ((int *) pt)[thread_id << 2] ^ rkey[0];
cw[1] = ((int *) pt)[thread_id << 2 | 1] ^ rkey[1];
cw[2] = ((int *) pt)[thread_id << 2 | 2] ^ rkey[2];
cw[3] = ((int *) pt)[thread_id << 2 | 3] ^ rkey[3];
/* cw[0] = tex1Dfetch(pt_texture, thread_id << 2) ^ rkey[0];
cw[1] = tex1Dfetch(pt_texture, thread_id << 2 | 1) ^ rkey[1];
cw[2] = tex1Dfetch(pt_texture, thread_id << 2 | 2) ^ rkey[2];
cw[3] = tex1Dfetch(pt_texture, thread_id << 2 | 3) ^ rkey[3];*/
//round 1
cw[4] = (MUL2(SboxCUDA[((unsigned char *) cw)[0]]) ^
MUL3(SboxCUDA[((unsigned char *) cw)[5]]) ^
SboxCUDA[((unsigned char *) cw)[10]] ^
SboxCUDA[((unsigned char *) cw)[15]]
|
(MUL2(SboxCUDA[((unsigned char *) cw)[5]]) ^
MUL3(SboxCUDA[((unsigned char *) cw)[10]]) ^
SboxCUDA[((unsigned char *) cw)[15]] ^
SboxCUDA[((unsigned char *) cw)[0]]) << 8
|
(MUL2(SboxCUDA[((unsigned char *) cw)[10]]) ^
MUL3(SboxCUDA[((unsigned char *) cw)[15]]) ^
SboxCUDA[((unsigned char *) cw)[0]] ^
SboxCUDA[((unsigned char *) cw)[5]]) << 16
|
(MUL2(SboxCUDA[((unsigned char *) cw)[15]]) ^
MUL3(SboxCUDA[((unsigned char *) cw)[0]]) ^
SboxCUDA[((unsigned char *) cw)[5]] ^
SboxCUDA[((unsigned char *) cw)[10]]) << 24)
^ rkey[4];
cw[5] = (MUL2(SboxCUDA[((unsigned char *) cw)[4]]) ^
MUL3(SboxCUDA[((unsigned char *) cw)[9]]) ^
SboxCUDA[((unsigned char *) cw)[14]] ^
SboxCUDA[((unsigned char *) cw)[3]]
|
(MUL2(SboxCUDA[((unsigned char *) cw)[9]]) ^
MUL3(SboxCUDA[((unsigned char *) cw)[14]]) ^
SboxCUDA[((unsigned char *) cw)[3]] ^
SboxCUDA[((unsigned char *) cw)[4]]) << 8
|
(MUL2(SboxCUDA[((unsigned char *) cw)[14]]) ^
MUL3(SboxCUDA[((unsigned char *) cw)[3]]) ^
SboxCUDA[((unsigned char *) cw)[4]] ^
SboxCUDA[((unsigned char *) cw)[9]]) << 16
|
(MUL2(SboxCUDA[((unsigned char *) cw)[3]]) ^
MUL3(SboxCUDA[((unsigned char *) cw)[4]]) ^
SboxCUDA[((unsigned char *) cw)[9]] ^
SboxCUDA[((unsigned char *) cw)[14]]) << 24)
^ rkey[5];
cw[6] = (MUL2(SboxCUDA[((unsigned char *) cw)[8]]) ^
MUL3(SboxCUDA[((unsigned char *) cw)[13]]) ^
SboxCUDA[((unsigned char *) cw)[2]] ^
SboxCUDA[((unsigned char *) cw)[7]]
|
(MUL2(SboxCUDA[((unsigned char *) cw)[13]]) ^
MUL3(SboxCUDA[((unsigned char *) cw)[2]]) ^
SboxCUDA[((unsigned char *) cw)[7]] ^
SboxCUDA[((unsigned char *) cw)[8]]) << 8
|
(MUL2(SboxCUDA[((unsigned char *) cw)[2]]) ^
MUL3(SboxCUDA[((unsigned char *) cw)[7]]) ^
SboxCUDA[((unsigned char *) cw)[8]] ^
SboxCUDA[((unsigned char *) cw)[13]]) << 16
|
(MUL2(SboxCUDA[((unsigned char *) cw)[7]]) ^
MUL3(SboxCUDA[((unsigned char *) cw)[8]]) ^
SboxCUDA[((unsigned char *) cw)[13]] ^
SboxCUDA[((unsigned char *) cw)[2]]) << 24)
^ rkey[6];
cw[7] = (MUL2(SboxCUDA[((unsigned char *) cw)[12]]) ^
MUL3(SboxCUDA[((unsigned char *) cw)[1]]) ^
SboxCUDA[((unsigned char *) cw)[6]] ^
SboxCUDA[((unsigned char *) cw)[11]]
|
(MUL2(SboxCUDA[((unsigned char *) cw)[1]]) ^
MUL3(SboxCUDA[((unsigned char *) cw)[6]]) ^
SboxCUDA[((unsigned char *) cw)[11]] ^
SboxCUDA[((unsigned char *) cw)[12]]) << 8
|
(MUL2(SboxCUDA[((unsigned char *) cw)[6]]) ^
MUL3(SboxCUDA[((unsigned char *) cw)[11]]) ^
SboxCUDA[((unsigned char *) cw)[12]] ^
SboxCUDA[((unsigned char *) cw)[1]]) << 16
|
(MUL2(SboxCUDA[((unsigned char *) cw)[11]]) ^
MUL3(SboxCUDA[((unsigned char *) cw)[12]]) ^
SboxCUDA[((unsigned char *) cw)[1]] ^
SboxCUDA[((unsigned char *) cw)[6]]) << 24)
^ rkey[7];
//round 2
cw[0] = (MUL2(SboxCUDA[((unsigned char *) cw)[16]]) ^
MUL3(SboxCUDA[((unsigned char *) cw)[21]]) ^
SboxCUDA[((unsigned char *) cw)[26]] ^
SboxCUDA[((unsigned char *) cw)[31]]
|
(MUL2(SboxCUDA[((unsigned char *) cw)[21]]) ^
MUL3(SboxCUDA[((unsigned char *) cw)[26]]) ^
SboxCUDA[((unsigned char *) cw)[31]] ^
SboxCUDA[((unsigned char *) cw)[16]]) << 8
|
(MUL2(SboxCUDA[((unsigned char *) cw)[26]]) ^
MUL3(SboxCUDA[((unsigned char *) cw)[31]]) ^
SboxCUDA[((unsigned char *) cw)[16]] ^
SboxCUDA[((unsigned char *) cw)[21]]) << 16
|
(MUL2(SboxCUDA[((unsigned char *) cw)[31]]) ^
MUL3(SboxCUDA[((unsigned char *) cw)[16]]) ^
SboxCUDA[((unsigned char *) cw)[21]] ^
SboxCUDA[((unsigned char *) cw)[26]]) << 24)
^ rkey[8];
cw[1] = (MUL2(SboxCUDA[((unsigned char *) cw)[20]]) ^
MUL3(SboxCUDA[((unsigned char *) cw)[25]]) ^
SboxCUDA[((unsigned char *) cw)[30]] ^
SboxCUDA[((unsigned char *) cw)[19]]
|
(MUL2(SboxCUDA[((unsigned char *) cw)[25]]) ^
MUL3(SboxCUDA[((unsigned char *) cw)[30]]) ^
SboxCUDA[((unsigned char *) cw)[19]] ^
SboxCUDA[((unsigned char *) cw)[20]]) << 8
|
(MUL2(SboxCUDA[((unsigned char *) cw)[30]]) ^
MUL3(SboxCUDA[((unsigned char *) cw)[19]]) ^
SboxCUDA[((unsigned char *) cw)[20]] ^
SboxCUDA[((unsigned char *) cw)[25]]) << 16
|
(MUL2(SboxCUDA[((unsigned char *) cw)[19]]) ^
MUL3(SboxCUDA[((unsigned char *) cw)[20]]) ^
SboxCUDA[((unsigned char *) cw)[25]] ^
SboxCUDA[((unsigned char *) cw)[30]]) << 24)
^ rkey[9];
cw[2] = (MUL2(SboxCUDA[((unsigned char *) cw)[24]]) ^
MUL3(SboxCUDA[((unsigned char *) cw)[29]]) ^
SboxCUDA[((unsigned char *) cw)[18]] ^
SboxCUDA[((unsigned char *) cw)[23]]
|
(MUL2(SboxCUDA[((unsigned char *) cw)[29]]) ^
MUL3(SboxCUDA[((unsigned char *) cw)[18]]) ^
SboxCUDA[((unsigned char *) cw)[23]] ^
SboxCUDA[((unsigned char *) cw)[24]]) << 8
|
(MUL2(SboxCUDA[((unsigned char *) cw)[18]]) ^
MUL3(SboxCUDA[((unsigned char *) cw)[23]]) ^
SboxCUDA[((unsigned char *) cw)[24]] ^
SboxCUDA[((unsigned char *) cw)[29]]) << 16
|
(MUL2(SboxCUDA[((unsigned char *) cw)[23]]) ^
MUL3(SboxCUDA[((unsigned char *) cw)[24]]) ^
SboxCUDA[((unsigned char *) cw)[29]] ^
SboxCUDA[((unsigned char *) cw)[18]]) << 24)
^ rkey[10];
cw[3] = (MUL2(SboxCUDA[((unsigned char *) cw)[28]]) ^
MUL3(SboxCUDA[((unsigned char *) cw)[17]]) ^
SboxCUDA[((unsigned char *) cw)[22]] ^
SboxCUDA[((unsigned char *) cw)[27]]
|
(MUL2(SboxCUDA[((unsigned char *) cw)[17]]) ^
MUL3(SboxCUDA[((unsigned char *) cw)[22]]) ^
SboxCUDA[((unsigned char *) cw)[27]] ^
SboxCUDA[((unsigned char *) cw)[28]]) << 8
|
(MUL2(SboxCUDA[((unsigned char *) cw)[22]]) ^
MUL3(SboxCUDA[((unsigned char *) cw)[27]]) ^
SboxCUDA[((unsigned char *) cw)[28]] ^
SboxCUDA[((unsigned char *) cw)[17]]) << 16
|
(MUL2(SboxCUDA[((unsigned char *) cw)[27]]) ^
MUL3(SboxCUDA[((unsigned char *) cw)[28]]) ^
SboxCUDA[((unsigned char *) cw)[17]] ^
SboxCUDA[((unsigned char *) cw)[22]]) << 24)
^ rkey[11];
//round 3
cw[4] = (MUL2(SboxCUDA[((unsigned char *) cw)[0]]) ^
MUL3(SboxCUDA[((unsigned char *) cw)[5]]) ^
SboxCUDA[((unsigned char *) cw)[10]] ^
SboxCUDA[((unsigned char *) cw)[15]]
|
(MUL2(SboxCUDA[((unsigned char *) cw)[5]]) ^
MUL3(SboxCUDA[((unsigned char *) cw)[10]]) ^
SboxCUDA[((unsigned char *) cw)[15]] ^
SboxCUDA[((unsigned char *) cw)[0]]) << 8
|
(MUL2(SboxCUDA[((unsigned char *) cw)[10]]) ^
MUL3(SboxCUDA[((unsigned char *) cw)[15]]) ^
SboxCUDA[((unsigned char *) cw)[0]] ^
SboxCUDA[((unsigned char *) cw)[5]]) << 16
|
(MUL2(SboxCUDA[((unsigned char *) cw)[15]]) ^
MUL3(SboxCUDA[((unsigned char *) cw)[0]]) ^
SboxCUDA[((unsigned char *) cw)[5]] ^
SboxCUDA[((unsigned char *) cw)[10]]) << 24)
^ rkey[12];
cw[5] = (MUL2(SboxCUDA[((unsigned char *) cw)[4]]) ^
MUL3(SboxCUDA[((unsigned char *) cw)[9]]) ^
SboxCUDA[((unsigned char *) cw)[14]] ^
SboxCUDA[((unsigned char *) cw)[3]]
|
(MUL2(SboxCUDA[((unsigned char *) cw)[9]]) ^
MUL3(SboxCUDA[((unsigned char *) cw)[14]]) ^
SboxCUDA[((unsigned char *) cw)[3]] ^
SboxCUDA[((unsigned char *) cw)[4]]) << 8
|
(MUL2(SboxCUDA[((unsigned char *) cw)[14]]) ^
MUL3(SboxCUDA[((unsigned char *) cw)[3]]) ^
SboxCUDA[((unsigned char *) cw)[4]] ^
SboxCUDA[((unsigned char *) cw)[9]]) << 16
|
(MUL2(SboxCUDA[((unsigned char *) cw)[3]]) ^
MUL3(SboxCUDA[((unsigned char *) cw)[4]]) ^
SboxCUDA[((unsigned char *) cw)[9]] ^
SboxCUDA[((unsigned char *) cw)[14]]) << 24)
^ rkey[13];
cw[6] = (MUL2(SboxCUDA[((unsigned char *) cw)[8]]) ^
MUL3(SboxCUDA[((unsigned char *) cw)[13]]) ^
SboxCUDA[((unsigned char *) cw)[2]] ^
SboxCUDA[((unsigned char *) cw)[7]]
|
(MUL2(SboxCUDA[((unsigned char *) cw)[13]]) ^
MUL3(SboxCUDA[((unsigned char *) cw)[2]]) ^
SboxCUDA[((unsigned char *) cw)[7]] ^
SboxCUDA[((unsigned char *) cw)[8]]) << 8
|
(MUL2(SboxCUDA[((unsigned char *) cw)[2]]) ^
MUL3(SboxCUDA[((unsigned char *) cw)[7]]) ^
SboxCUDA[((unsigned char *) cw)[8]] ^
SboxCUDA[((unsigned char *) cw)[13]]) << 16
|
(MUL2(SboxCUDA[((unsigned char *) cw)[7]]) ^
MUL3(SboxCUDA[((unsigned char *) cw)[8]]) ^
SboxCUDA[((unsigned char *) cw)[13]] ^
SboxCUDA[((unsigned char *) cw)[2]]) << 24)
^ rkey[14];
cw[7] = (MUL2(SboxCUDA[((unsigned char *) cw)[12]]) ^
MUL3(SboxCUDA[((unsigned char *) cw)[1]]) ^
SboxCUDA[((unsigned char *) cw)[6]] ^
SboxCUDA[((unsigned char *) cw)[11]]
|
(MUL2(SboxCUDA[((unsigned char *) cw)[1]]) ^
MUL3(SboxCUDA[((unsigned char *) cw)[6]]) ^
SboxCUDA[((unsigned char *) cw)[11]] ^
SboxCUDA[((unsigned char *) cw)[12]]) << 8
|
(MUL2(SboxCUDA[((unsigned char *) cw)[6]]) ^
MUL3(SboxCUDA[((unsigned char *) cw)[11]]) ^
SboxCUDA[((unsigned char *) cw)[12]] ^
SboxCUDA[((unsigned char *) cw)[1]]) << 16
|
(MUL2(SboxCUDA[((unsigned char *) cw)[11]]) ^
MUL3(SboxCUDA[((unsigned char *) cw)[12]]) ^
SboxCUDA[((unsigned char *) cw)[1]] ^
SboxCUDA[((unsigned char *) cw)[6]]) << 24)
^ rkey[15];
//round 4
cw[0] = (MUL2(SboxCUDA[((unsigned char *) cw)[16]]) ^
MUL3(SboxCUDA[((unsigned char *) cw)[21]]) ^
SboxCUDA[((unsigned char *) cw)[26]] ^
SboxCUDA[((unsigned char *) cw)[31]]
|
(MUL2(SboxCUDA[((unsigned char *) cw)[21]]) ^
MUL3(SboxCUDA[((unsigned char *) cw)[26]]) ^
SboxCUDA[((unsigned char *) cw)[31]] ^
SboxCUDA[((unsigned char *) cw)[16]]) << 8
|
(MUL2(SboxCUDA[((unsigned char *) cw)[26]]) ^
MUL3(SboxCUDA[((unsigned char *) cw)[31]]) ^
SboxCUDA[((unsigned char *) cw)[16]] ^
SboxCUDA[((unsigned char *) cw)[21]]) << 16
|
(MUL2(SboxCUDA[((unsigned char *) cw)[31]]) ^
MUL3(SboxCUDA[((unsigned char *) cw)[16]]) ^
SboxCUDA[((unsigned char *) cw)[21]] ^
SboxCUDA[((unsigned char *) cw)[26]]) << 24)
^ rkey[16];
cw[1] = (MUL2(SboxCUDA[((unsigned char *) cw)[20]]) ^
MUL3(SboxCUDA[((unsigned char *) cw)[25]]) ^
SboxCUDA[((unsigned char *) cw)[30]] ^
SboxCUDA[((unsigned char *) cw)[19]]
|
(MUL2(SboxCUDA[((unsigned char *) cw)[25]]) ^
MUL3(SboxCUDA[((unsigned char *) cw)[30]]) ^
SboxCUDA[((unsigned char *) cw)[19]] ^
SboxCUDA[((unsigned char *) cw)[20]]) << 8
|
(MUL2(SboxCUDA[((unsigned char *) cw)[30]]) ^
MUL3(SboxCUDA[((unsigned char *) cw)[19]]) ^
SboxCUDA[((unsigned char *) cw)[20]] ^
SboxCUDA[((unsigned char *) cw)[25]]) << 16
|
(MUL2(SboxCUDA[((unsigned char *) cw)[19]]) ^
MUL3(SboxCUDA[((unsigned char *) cw)[20]]) ^
SboxCUDA[((unsigned char *) cw)[25]] ^
SboxCUDA[((unsigned char *) cw)[30]]) << 24)
^ rkey[17];
cw[2] = (MUL2(SboxCUDA[((unsigned char *) cw)[24]]) ^
MUL3(SboxCUDA[((unsigned char *) cw)[29]]) ^
SboxCUDA[((unsigned char *) cw)[18]] ^
SboxCUDA[((unsigned char *) cw)[23]]
|
(MUL2(SboxCUDA[((unsigned char *) cw)[29]]) ^
MUL3(SboxCUDA[((unsigned char *) cw)[18]]) ^
SboxCUDA[((unsigned char *) cw)[23]] ^
SboxCUDA[((unsigned char *) cw)[24]]) << 8
|
(MUL2(SboxCUDA[((unsigned char *) cw)[18]]) ^
MUL3(SboxCUDA[((unsigned char *) cw)[23]]) ^
SboxCUDA[((unsigned char *) cw)[24]] ^
SboxCUDA[((unsigned char *) cw)[29]]) << 16
|
(MUL2(SboxCUDA[((unsigned char *) cw)[23]]) ^
MUL3(SboxCUDA[((unsigned char *) cw)[24]]) ^
SboxCUDA[((unsigned char *) cw)[29]] ^
SboxCUDA[((unsigned char *) cw)[18]]) << 24)
^ rkey[18];
cw[3] = (MUL2(SboxCUDA[((unsigned char *) cw)[28]]) ^
MUL3(SboxCUDA[((unsigned char *) cw)[17]]) ^
SboxCUDA[((unsigned char *) cw)[22]] ^
SboxCUDA[((unsigned char *) cw)[27]]
|
(MUL2(SboxCUDA[((unsigned char *) cw)[17]]) ^
MUL3(SboxCUDA[((unsigned char *) cw)[22]]) ^
SboxCUDA[((unsigned char *) cw)[27]] ^
SboxCUDA[((unsigned char *) cw)[28]]) << 8
|
(MUL2(SboxCUDA[((unsigned char *) cw)[22]]) ^
MUL3(SboxCUDA[((unsigned char *) cw)[27]]) ^
SboxCUDA[((unsigned char *) cw)[28]] ^
SboxCUDA[((unsigned char *) cw)[17]]) << 16
|
(MUL2(SboxCUDA[((unsigned char *) cw)[27]]) ^
MUL3(SboxCUDA[((unsigned char *) cw)[28]]) ^
SboxCUDA[((unsigned char *) cw)[17]] ^
SboxCUDA[((unsigned char *) cw)[22]]) << 24)
^ rkey[19];
//round 5
cw[4] = (MUL2(SboxCUDA[((unsigned char *) cw)[0]]) ^
MUL3(SboxCUDA[((unsigned char *) cw)[5]]) ^
SboxCUDA[((unsigned char *) cw)[10]] ^
SboxCUDA[((unsigned char *) cw)[15]]
|
(MUL2(SboxCUDA[((unsigned char *) cw)[5]]) ^
MUL3(SboxCUDA[((unsigned char *) cw)[10]]) ^
SboxCUDA[((unsigned char *) cw)[15]] ^
SboxCUDA[((unsigned char *) cw)[0]]) << 8
|
(MUL2(SboxCUDA[((unsigned char *) cw)[10]]) ^
MUL3(SboxCUDA[((unsigned char *) cw)[15]]) ^
SboxCUDA[((unsigned char *) cw)[0]] ^
SboxCUDA[((unsigned char *) cw)[5]]) << 16
|
(MUL2(SboxCUDA[((unsigned char *) cw)[15]]) ^
MUL3(SboxCUDA[((unsigned char *) cw)[0]]) ^
SboxCUDA[((unsigned char *) cw)[5]] ^
SboxCUDA[((unsigned char *) cw)[10]]) << 24)
^ rkey[20];
cw[5] = (MUL2(SboxCUDA[((unsigned char *) cw)[4]]) ^
MUL3(SboxCUDA[((unsigned char *) cw)[9]]) ^
SboxCUDA[((unsigned char *) cw)[14]] ^
SboxCUDA[((unsigned char *) cw)[3]]
|
(MUL2(SboxCUDA[((unsigned char *) cw)[9]]) ^
MUL3(SboxCUDA[((unsigned char *) cw)[14]]) ^
SboxCUDA[((unsigned char *) cw)[3]] ^
SboxCUDA[((unsigned char *) cw)[4]]) << 8
|
(MUL2(SboxCUDA[((unsigned char *) cw)[14]]) ^
MUL3(SboxCUDA[((unsigned char *) cw)[3]]) ^
SboxCUDA[((unsigned char *) cw)[4]] ^
SboxCUDA[((unsigned char *) cw)[9]]) << 16
|
(MUL2(SboxCUDA[((unsigned char *) cw)[3]]) ^
MUL3(SboxCUDA[((unsigned char *) cw)[4]]) ^
SboxCUDA[((unsigned char *) cw)[9]] ^
SboxCUDA[((unsigned char *) cw)[14]]) << 24)
^ rkey[21];
cw[6] = (MUL2(SboxCUDA[((unsigned char *) cw)[8]]) ^
MUL3(SboxCUDA[((unsigned char *) cw)[13]]) ^
SboxCUDA[((unsigned char *) cw)[2]] ^
SboxCUDA[((unsigned char *) cw)[7]]
|
(MUL2(SboxCUDA[((unsigned char *) cw)[13]]) ^
MUL3(SboxCUDA[((unsigned char *) cw)[2]]) ^
SboxCUDA[((unsigned char *) cw)[7]] ^
SboxCUDA[((unsigned char *) cw)[8]]) << 8
|
(MUL2(SboxCUDA[((unsigned char *) cw)[2]]) ^
MUL3(SboxCUDA[((unsigned char *) cw)[7]]) ^
SboxCUDA[((unsigned char *) cw)[8]] ^
SboxCUDA[((unsigned char *) cw)[13]]) << 16
|
(MUL2(SboxCUDA[((unsigned char *) cw)[7]]) ^
MUL3(SboxCUDA[((unsigned char *) cw)[8]]) ^
SboxCUDA[((unsigned char *) cw)[13]] ^
SboxCUDA[((unsigned char *) cw)[2]]) << 24)
^ rkey[22];
cw[7] = (MUL2(SboxCUDA[((unsigned char *) cw)[12]]) ^
MUL3(SboxCUDA[((unsigned char *) cw)[1]]) ^
SboxCUDA[((unsigned char *) cw)[6]] ^
SboxCUDA[((unsigned char *) cw)[11]]
|
(MUL2(SboxCUDA[((unsigned char *) cw)[1]]) ^
MUL3(SboxCUDA[((unsigned char *) cw)[6]]) ^
SboxCUDA[((unsigned char *) cw)[11]] ^
SboxCUDA[((unsigned char *) cw)[12]]) << 8
|
(MUL2(SboxCUDA[((unsigned char *) cw)[6]]) ^
MUL3(SboxCUDA[((unsigned char *) cw)[11]]) ^
SboxCUDA[((unsigned char *) cw)[12]] ^
SboxCUDA[((unsigned char *) cw)[1]]) << 16
|
(MUL2(SboxCUDA[((unsigned char *) cw)[11]]) ^
MUL3(SboxCUDA[((unsigned char *) cw)[12]]) ^
SboxCUDA[((unsigned char *) cw)[1]] ^
SboxCUDA[((unsigned char *) cw)[6]]) << 24)
^ rkey[23];
//round 6
cw[0] = (MUL2(SboxCUDA[((unsigned char *) cw)[16]]) ^
MUL3(SboxCUDA[((unsigned char *) cw)[21]]) ^
SboxCUDA[((unsigned char *) cw)[26]] ^
SboxCUDA[((unsigned char *) cw)[31]]
|
(MUL2(SboxCUDA[((unsigned char *) cw)[21]]) ^
MUL3(SboxCUDA[((unsigned char *) cw)[26]]) ^
SboxCUDA[((unsigned char *) cw)[31]] ^
SboxCUDA[((unsigned char *) cw)[16]]) << 8
|
(MUL2(SboxCUDA[((unsigned char *) cw)[26]]) ^
MUL3(SboxCUDA[((unsigned char *) cw)[31]]) ^
SboxCUDA[((unsigned char *) cw)[16]] ^
SboxCUDA[((unsigned char *) cw)[21]]) << 16
|
(MUL2(SboxCUDA[((unsigned char *) cw)[31]]) ^
MUL3(SboxCUDA[((unsigned char *) cw)[16]]) ^
SboxCUDA[((unsigned char *) cw)[21]] ^
SboxCUDA[((unsigned char *) cw)[26]]) << 24)
^ rkey[24];
cw[1] = (MUL2(SboxCUDA[((unsigned char *) cw)[20]]) ^
MUL3(SboxCUDA[((unsigned char *) cw)[25]]) ^
SboxCUDA[((unsigned char *) cw)[30]] ^
SboxCUDA[((unsigned char *) cw)[19]]
|
(MUL2(SboxCUDA[((unsigned char *) cw)[25]]) ^
MUL3(SboxCUDA[((unsigned char *) cw)[30]]) ^
SboxCUDA[((unsigned char *) cw)[19]] ^
SboxCUDA[((unsigned char *) cw)[20]]) << 8
|
(MUL2(SboxCUDA[((unsigned char *) cw)[30]]) ^
MUL3(SboxCUDA[((unsigned char *) cw)[19]]) ^
SboxCUDA[((unsigned char *) cw)[20]] ^
SboxCUDA[((unsigned char *) cw)[25]]) << 16
|
(MUL2(SboxCUDA[((unsigned char *) cw)[19]]) ^
MUL3(SboxCUDA[((unsigned char *) cw)[20]]) ^
SboxCUDA[((unsigned char *) cw)[25]] ^
SboxCUDA[((unsigned char *) cw)[30]]) << 24)
^ rkey[25];
cw[2] = (MUL2(SboxCUDA[((unsigned char *) cw)[24]]) ^
MUL3(SboxCUDA[((unsigned char *) cw)[29]]) ^
SboxCUDA[((unsigned char *) cw)[18]] ^
SboxCUDA[((unsigned char *) cw)[23]]
|
(MUL2(SboxCUDA[((unsigned char *) cw)[29]]) ^
MUL3(SboxCUDA[((unsigned char *) cw)[18]]) ^
SboxCUDA[((unsigned char *) cw)[23]] ^
SboxCUDA[((unsigned char *) cw)[24]]) << 8
|
(MUL2(SboxCUDA[((unsigned char *) cw)[18]]) ^
MUL3(SboxCUDA[((unsigned char *) cw)[23]]) ^
SboxCUDA[((unsigned char *) cw)[24]] ^
SboxCUDA[((unsigned char *) cw)[29]]) << 16
|
(MUL2(SboxCUDA[((unsigned char *) cw)[23]]) ^
MUL3(SboxCUDA[((unsigned char *) cw)[24]]) ^
SboxCUDA[((unsigned char *) cw)[29]] ^
SboxCUDA[((unsigned char *) cw)[18]]) << 24)
^ rkey[26];
cw[3] = (MUL2(SboxCUDA[((unsigned char *) cw)[28]]) ^
MUL3(SboxCUDA[((unsigned char *) cw)[17]]) ^
SboxCUDA[((unsigned char *) cw)[22]] ^
SboxCUDA[((unsigned char *) cw)[27]]
|
(MUL2(SboxCUDA[((unsigned char *) cw)[17]]) ^
MUL3(SboxCUDA[((unsigned char *) cw)[22]]) ^
SboxCUDA[((unsigned char *) cw)[27]] ^
SboxCUDA[((unsigned char *) cw)[28]]) << 8
|
(MUL2(SboxCUDA[((unsigned char *) cw)[22]]) ^
MUL3(SboxCUDA[((unsigned char *) cw)[27]]) ^
SboxCUDA[((unsigned char *) cw)[28]] ^
SboxCUDA[((unsigned char *) cw)[17]]) << 16
|
(MUL2(SboxCUDA[((unsigned char *) cw)[27]]) ^
MUL3(SboxCUDA[((unsigned char *) cw)[28]]) ^
SboxCUDA[((unsigned char *) cw)[17]] ^
SboxCUDA[((unsigned char *) cw)[22]]) << 24)
^ rkey[27];
//round 7
cw[4] = (MUL2(SboxCUDA[((unsigned char *) cw)[0]]) ^
MUL3(SboxCUDA[((unsigned char *) cw)[5]]) ^
SboxCUDA[((unsigned char *) cw)[10]] ^
SboxCUDA[((unsigned char *) cw)[15]]
|
(MUL2(SboxCUDA[((unsigned char *) cw)[5]]) ^
MUL3(SboxCUDA[((unsigned char *) cw)[10]]) ^
SboxCUDA[((unsigned char *) cw)[15]] ^
SboxCUDA[((unsigned char *) cw)[0]]) << 8
|
(MUL2(SboxCUDA[((unsigned char *) cw)[10]]) ^
MUL3(SboxCUDA[((unsigned char *) cw)[15]]) ^
SboxCUDA[((unsigned char *) cw)[0]] ^
SboxCUDA[((unsigned char *) cw)[5]]) << 16
|
(MUL2(SboxCUDA[((unsigned char *) cw)[15]]) ^
MUL3(SboxCUDA[((unsigned char *) cw)[0]]) ^
SboxCUDA[((unsigned char *) cw)[5]] ^
SboxCUDA[((unsigned char *) cw)[10]]) << 24)
^ rkey[28];
cw[5] = (MUL2(SboxCUDA[((unsigned char *) cw)[4]]) ^
MUL3(SboxCUDA[((unsigned char *) cw)[9]]) ^
SboxCUDA[((unsigned char *) cw)[14]] ^
SboxCUDA[((unsigned char *) cw)[3]]
|
(MUL2(SboxCUDA[((unsigned char *) cw)[9]]) ^
MUL3(SboxCUDA[((unsigned char *) cw)[14]]) ^
SboxCUDA[((unsigned char *) cw)[3]] ^
SboxCUDA[((unsigned char *) cw)[4]]) << 8
|
(MUL2(SboxCUDA[((unsigned char *) cw)[14]]) ^
MUL3(SboxCUDA[((unsigned char *) cw)[3]]) ^
SboxCUDA[((unsigned char *) cw)[4]] ^
SboxCUDA[((unsigned char *) cw)[9]]) << 16
|
(MUL2(SboxCUDA[((unsigned char *) cw)[3]]) ^
MUL3(SboxCUDA[((unsigned char *) cw)[4]]) ^
SboxCUDA[((unsigned char *) cw)[9]] ^
SboxCUDA[((unsigned char *) cw)[14]]) << 24)
^ rkey[29];
cw[6] = (MUL2(SboxCUDA[((unsigned char *) cw)[8]]) ^
MUL3(SboxCUDA[((unsigned char *) cw)[13]]) ^
SboxCUDA[((unsigned char *) cw)[2]] ^
SboxCUDA[((unsigned char *) cw)[7]]
|
(MUL2(SboxCUDA[((unsigned char *) cw)[13]]) ^
MUL3(SboxCUDA[((unsigned char *) cw)[2]]) ^
SboxCUDA[((unsigned char *) cw)[7]] ^
SboxCUDA[((unsigned char *) cw)[8]]) << 8
|
(MUL2(SboxCUDA[((unsigned char *) cw)[2]]) ^
MUL3(SboxCUDA[((unsigned char *) cw)[7]]) ^
SboxCUDA[((unsigned char *) cw)[8]] ^
SboxCUDA[((unsigned char *) cw)[13]]) << 16
|
(MUL2(SboxCUDA[((unsigned char *) cw)[7]]) ^
MUL3(SboxCUDA[((unsigned char *) cw)[8]]) ^
SboxCUDA[((unsigned char *) cw)[13]] ^
SboxCUDA[((unsigned char *) cw)[2]]) << 24)
^ rkey[30];
cw[7] = (MUL2(SboxCUDA[((unsigned char *) cw)[12]]) ^
MUL3(SboxCUDA[((unsigned char *) cw)[1]]) ^
SboxCUDA[((unsigned char *) cw)[6]] ^
SboxCUDA[((unsigned char *) cw)[11]]
|
(MUL2(SboxCUDA[((unsigned char *) cw)[1]]) ^
MUL3(SboxCUDA[((unsigned char *) cw)[6]]) ^
SboxCUDA[((unsigned char *) cw)[11]] ^
SboxCUDA[((unsigned char *) cw)[12]]) << 8
|
(MUL2(SboxCUDA[((unsigned char *) cw)[6]]) ^
MUL3(SboxCUDA[((unsigned char *) cw)[11]]) ^
SboxCUDA[((unsigned char *) cw)[12]] ^
SboxCUDA[((unsigned char *) cw)[1]]) << 16
|
(MUL2(SboxCUDA[((unsigned char *) cw)[11]]) ^
MUL3(SboxCUDA[((unsigned char *) cw)[12]]) ^
SboxCUDA[((unsigned char *) cw)[1]] ^
SboxCUDA[((unsigned char *) cw)[6]]) << 24)
^ rkey[31];
//round 8
cw[0] = (MUL2(SboxCUDA[((unsigned char *) cw)[16]]) ^
MUL3(SboxCUDA[((unsigned char *) cw)[21]]) ^
SboxCUDA[((unsigned char *) cw)[26]] ^
SboxCUDA[((unsigned char *) cw)[31]]
|
(MUL2(SboxCUDA[((unsigned char *) cw)[21]]) ^
MUL3(SboxCUDA[((unsigned char *) cw)[26]]) ^
SboxCUDA[((unsigned char *) cw)[31]] ^
SboxCUDA[((unsigned char *) cw)[16]]) << 8
|
(MUL2(SboxCUDA[((unsigned char *) cw)[26]]) ^
MUL3(SboxCUDA[((unsigned char *) cw)[31]]) ^
SboxCUDA[((unsigned char *) cw)[16]] ^
SboxCUDA[((unsigned char *) cw)[21]]) << 16
|
(MUL2(SboxCUDA[((unsigned char *) cw)[31]]) ^
MUL3(SboxCUDA[((unsigned char *) cw)[16]]) ^
SboxCUDA[((unsigned char *) cw)[21]] ^
SboxCUDA[((unsigned char *) cw)[26]]) << 24)
^ rkey[32];
cw[1] = (MUL2(SboxCUDA[((unsigned char *) cw)[20]]) ^
MUL3(SboxCUDA[((unsigned char *) cw)[25]]) ^
SboxCUDA[((unsigned char *) cw)[30]] ^
SboxCUDA[((unsigned char *) cw)[19]]
|
(MUL2(SboxCUDA[((unsigned char *) cw)[25]]) ^
MUL3(SboxCUDA[((unsigned char *) cw)[30]]) ^
SboxCUDA[((unsigned char *) cw)[19]] ^
SboxCUDA[((unsigned char *) cw)[20]]) << 8
|
(MUL2(SboxCUDA[((unsigned char *) cw)[30]]) ^
MUL3(SboxCUDA[((unsigned char *) cw)[19]]) ^
SboxCUDA[((unsigned char *) cw)[20]] ^
SboxCUDA[((unsigned char *) cw)[25]]) << 16
|
(MUL2(SboxCUDA[((unsigned char *) cw)[19]]) ^
MUL3(SboxCUDA[((unsigned char *) cw)[20]]) ^
SboxCUDA[((unsigned char *) cw)[25]] ^
SboxCUDA[((unsigned char *) cw)[30]]) << 24)
^ rkey[33];
cw[2] = (MUL2(SboxCUDA[((unsigned char *) cw)[24]]) ^
MUL3(SboxCUDA[((unsigned char *) cw)[29]]) ^
SboxCUDA[((unsigned char *) cw)[18]] ^
SboxCUDA[((unsigned char *) cw)[23]]
|
(MUL2(SboxCUDA[((unsigned char *) cw)[29]]) ^
MUL3(SboxCUDA[((unsigned char *) cw)[18]]) ^
SboxCUDA[((unsigned char *) cw)[23]] ^
SboxCUDA[((unsigned char *) cw)[24]]) << 8
|
(MUL2(SboxCUDA[((unsigned char *) cw)[18]]) ^
MUL3(SboxCUDA[((unsigned char *) cw)[23]]) ^
SboxCUDA[((unsigned char *) cw)[24]] ^
SboxCUDA[((unsigned char *) cw)[29]]) << 16
|
(MUL2(SboxCUDA[((unsigned char *) cw)[23]]) ^
MUL3(SboxCUDA[((unsigned char *) cw)[24]]) ^
SboxCUDA[((unsigned char *) cw)[29]] ^
SboxCUDA[((unsigned char *) cw)[18]]) << 24)
^ rkey[34];
cw[3] = (MUL2(SboxCUDA[((unsigned char *) cw)[28]]) ^
MUL3(SboxCUDA[((unsigned char *) cw)[17]]) ^
SboxCUDA[((unsigned char *) cw)[22]] ^
SboxCUDA[((unsigned char *) cw)[27]]
|
(MUL2(SboxCUDA[((unsigned char *) cw)[17]]) ^
MUL3(SboxCUDA[((unsigned char *) cw)[22]]) ^
SboxCUDA[((unsigned char *) cw)[27]] ^
SboxCUDA[((unsigned char *) cw)[28]]) << 8
|
(MUL2(SboxCUDA[((unsigned char *) cw)[22]]) ^
MUL3(SboxCUDA[((unsigned char *) cw)[27]]) ^
SboxCUDA[((unsigned char *) cw)[28]] ^
SboxCUDA[((unsigned char *) cw)[17]]) << 16
|
(MUL2(SboxCUDA[((unsigned char *) cw)[27]]) ^
MUL3(SboxCUDA[((unsigned char *) cw)[28]]) ^
SboxCUDA[((unsigned char *) cw)[17]] ^
SboxCUDA[((unsigned char *) cw)[22]]) << 24)
^ rkey[35];
//round 9
cw[4] = (MUL2(SboxCUDA[((unsigned char *) cw)[0]]) ^
MUL3(SboxCUDA[((unsigned char *) cw)[5]]) ^
SboxCUDA[((unsigned char *) cw)[10]] ^
SboxCUDA[((unsigned char *) cw)[15]]
|
(MUL2(SboxCUDA[((unsigned char *) cw)[5]]) ^
MUL3(SboxCUDA[((unsigned char *) cw)[10]]) ^
SboxCUDA[((unsigned char *) cw)[15]] ^
SboxCUDA[((unsigned char *) cw)[0]]) << 8
|
(MUL2(SboxCUDA[((unsigned char *) cw)[10]]) ^
MUL3(SboxCUDA[((unsigned char *) cw)[15]]) ^
SboxCUDA[((unsigned char *) cw)[0]] ^
SboxCUDA[((unsigned char *) cw)[5]]) << 16
|
(MUL2(SboxCUDA[((unsigned char *) cw)[15]]) ^
MUL3(SboxCUDA[((unsigned char *) cw)[0]]) ^
SboxCUDA[((unsigned char *) cw)[5]] ^
SboxCUDA[((unsigned char *) cw)[10]]) << 24)
^ rkey[36];
cw[5] = (MUL2(SboxCUDA[((unsigned char *) cw)[4]]) ^
MUL3(SboxCUDA[((unsigned char *) cw)[9]]) ^
SboxCUDA[((unsigned char *) cw)[14]] ^
SboxCUDA[((unsigned char *) cw)[3]]
|
(MUL2(SboxCUDA[((unsigned char *) cw)[9]]) ^
MUL3(SboxCUDA[((unsigned char *) cw)[14]]) ^
SboxCUDA[((unsigned char *) cw)[3]] ^
SboxCUDA[((unsigned char *) cw)[4]]) << 8
|
(MUL2(SboxCUDA[((unsigned char *) cw)[14]]) ^
MUL3(SboxCUDA[((unsigned char *) cw)[3]]) ^
SboxCUDA[((unsigned char *) cw)[4]] ^
SboxCUDA[((unsigned char *) cw)[9]]) << 16
|
(MUL2(SboxCUDA[((unsigned char *) cw)[3]]) ^
MUL3(SboxCUDA[((unsigned char *) cw)[4]]) ^
SboxCUDA[((unsigned char *) cw)[9]] ^
SboxCUDA[((unsigned char *) cw)[14]]) << 24)
^ rkey[37];
cw[6] = (MUL2(SboxCUDA[((unsigned char *) cw)[8]]) ^
MUL3(SboxCUDA[((unsigned char *) cw)[13]]) ^
SboxCUDA[((unsigned char *) cw)[2]] ^
SboxCUDA[((unsigned char *) cw)[7]]
|
(MUL2(SboxCUDA[((unsigned char *) cw)[13]]) ^
MUL3(SboxCUDA[((unsigned char *) cw)[2]]) ^
SboxCUDA[((unsigned char *) cw)[7]] ^
SboxCUDA[((unsigned char *) cw)[8]]) << 8
|
(MUL2(SboxCUDA[((unsigned char *) cw)[2]]) ^
MUL3(SboxCUDA[((unsigned char *) cw)[7]]) ^
SboxCUDA[((unsigned char *) cw)[8]] ^
SboxCUDA[((unsigned char *) cw)[13]]) << 16
|
(MUL2(SboxCUDA[((unsigned char *) cw)[7]]) ^
MUL3(SboxCUDA[((unsigned char *) cw)[8]]) ^
SboxCUDA[((unsigned char *) cw)[13]] ^
SboxCUDA[((unsigned char *) cw)[2]]) << 24)
^ rkey[38];
cw[7] = (MUL2(SboxCUDA[((unsigned char *) cw)[12]]) ^
MUL3(SboxCUDA[((unsigned char *) cw)[1]]) ^
SboxCUDA[((unsigned char *) cw)[6]] ^
SboxCUDA[((unsigned char *) cw)[11]]
|
(MUL2(SboxCUDA[((unsigned char *) cw)[1]]) ^
MUL3(SboxCUDA[((unsigned char *) cw)[6]]) ^
SboxCUDA[((unsigned char *) cw)[11]] ^
SboxCUDA[((unsigned char *) cw)[12]]) << 8
|
(MUL2(SboxCUDA[((unsigned char *) cw)[6]]) ^
MUL3(SboxCUDA[((unsigned char *) cw)[11]]) ^
SboxCUDA[((unsigned char *) cw)[12]] ^
SboxCUDA[((unsigned char *) cw)[1]]) << 16
|
(MUL2(SboxCUDA[((unsigned char *) cw)[11]]) ^
MUL3(SboxCUDA[((unsigned char *) cw)[12]]) ^
SboxCUDA[((unsigned char *) cw)[1]] ^
SboxCUDA[((unsigned char *) cw)[6]]) << 24)
^ rkey[39];
cb[0] = SboxCUDA[cb[16]];
cb[1] = SboxCUDA[cb[21]];
cb[2] = SboxCUDA[cb[26]];
cb[3] = SboxCUDA[cb[31]];
((int *) ct)[thread_id << 2] = cw[0] ^ rkey[40];
cb[4] = SboxCUDA[cb[20]];
cb[5] = SboxCUDA[cb[25]];
cb[6] = SboxCUDA[cb[30]];
cb[7] = SboxCUDA[cb[19]];
((int *) ct)[thread_id << 2 | 1] = cw[1] ^ rkey[41];
cb[8] = SboxCUDA[cb[24]];
cb[9] = SboxCUDA[cb[29]];
cb[10] = SboxCUDA[cb[18]];
cb[11] = SboxCUDA[cb[23]];
((int *) ct)[thread_id << 2 | 2] = cw[2] ^ rkey[42];
cb[12] = SboxCUDA[cb[28]];
cb[13] = SboxCUDA[cb[17]];
cb[14] = SboxCUDA[cb[22]];
cb[15] = SboxCUDA[cb[27]];
((int *) ct)[thread_id << 2 | 3] = cw[3] ^ rkey[43];
/* if (thread_id == 0) {
printf("state0: 0x%x\n", ((int *) ct)[thread_id << 2]);
printf("state1: 0x%x\n", ((int *) ct)[thread_id << 2|1]);
printf("state2: 0x%x\n", ((int *) ct)[thread_id << 2|2]);
printf("state3: 0x%x\n", ((int *) ct)[thread_id << 2|3]);
}*/
}
void launch_aes_kernel(unsigned char *pt, int *rk, unsigned char *ct, long int size) {
//This function launches the AES kernel.
//Please modify this function for AES kernel.
//In this function, you need to allocate the device memory and so on.
unsigned char *d_ct, *d_pt;
// int *d_pt;
long int size2 = size >> 6;
hipStream_t stream[Stream];
dim3 dim_grid(GRIDSIZE >> 6, 1, 1), dim_block(BLOCKSIZE, 1, 1);
hipHostRegister(pt, size, hipHostRegisterDefault);
hipHostRegister(ct, size, hipHostRegisterDefault);
hipMalloc((void **) &d_pt, size);
hipMalloc((void **) &d_ct, size);
hipMemcpyToSymbol(rkey, rk, 176);
hipStreamCreateWithFlags(&stream[0], hipStreamNonBlocking);
hipMemcpyAsync(d_pt, pt, size2, hipMemcpyHostToDevice, stream[0]);
// hipBindTexture(NULL, pt_texture, d_pt);
int i;
for (i = 0; i < Stream; i++) {
hipLaunchKernelGGL(( device_aes_encrypt) , dim3(dim_grid), dim3(dim_block), 0, stream[i] , d_pt + size2 * i, d_ct + size2 * i);
hipMemcpyAsync(ct + size2 * i, d_ct + size2 * i, size2, hipMemcpyDeviceToHost, stream[i]);
if (i != Stream - 1) {
hipStreamCreateWithFlags(&stream[i + 1], hipStreamNonBlocking);
hipMemcpyAsync(d_pt + size2 * (i + 1), pt + size2 * (i + 1), size2, hipMemcpyHostToDevice, stream[i + 1]);
}
}
// hipUnbindTexture(pt_texture);
int stm;
for (stm = 0; stm < Stream; stm++) {
hipStreamDestroy(stream[stm]);
}
hipHostUnregister(pt);
hipHostUnregister(ct);
hipFree(d_pt);
hipFree(d_ct);
}
| 308f6baac6c72afd5b32fbbf449427ffa41f3e04.cu | #include <stdio.h>
#include <stdlib.h>
#include <math.h>
#include "calculation.h"
#define MUL3(x) (x & 0x80 ? ((x << 1 ^0x1b) & 0xff ^x) : ((x << 1) ^ x))
#define MUL2(x) (x & 0x80 ? (x << 1 ^0x1b) & 0xff : (x << 1))
#define BLOCKSIZE (128)
#define GRIDSIZE ((FILESIZE/16)/BLOCKSIZE) //128*26*32
#define Stream (64)
#define NBb2 (NBb << 1)
texture<int, 1, cudaReadModeElementType> pt_texture;
__constant__ int rkey[44];
__shared__ unsigned char SboxCUDA[256];
__constant__ unsigned char SboxCUDAConst[256] = {
0x63, 0x7c, 0x77, 0x7b, 0xf2, 0x6b, 0x6f, 0xc5, 0x30, 0x01, 0x67, 0x2b, 0xfe, 0xd7, 0xab, 0x76,
0xca, 0x82, 0xc9, 0x7d, 0xfa, 0x59, 0x47, 0xf0, 0xad, 0xd4, 0xa2, 0xaf, 0x9c, 0xa4, 0x72, 0xc0,
0xb7, 0xfd, 0x93, 0x26, 0x36, 0x3f, 0xf7, 0xcc, 0x34, 0xa5, 0xe5, 0xf1, 0x71, 0xd8, 0x31, 0x15,
0x04, 0xc7, 0x23, 0xc3, 0x18, 0x96, 0x05, 0x9a, 0x07, 0x12, 0x80, 0xe2, 0xeb, 0x27, 0xb2, 0x75,
0x09, 0x83, 0x2c, 0x1a, 0x1b, 0x6e, 0x5a, 0xa0, 0x52, 0x3b, 0xd6, 0xb3, 0x29, 0xe3, 0x2f, 0x84,
0x53, 0xd1, 0x00, 0xed, 0x20, 0xfc, 0xb1, 0x5b, 0x6a, 0xcb, 0xbe, 0x39, 0x4a, 0x4c, 0x58, 0xcf,
0xd0, 0xef, 0xaa, 0xfb, 0x43, 0x4d, 0x33, 0x85, 0x45, 0xf9, 0x02, 0x7f, 0x50, 0x3c, 0x9f, 0xa8,
0x51, 0xa3, 0x40, 0x8f, 0x92, 0x9d, 0x38, 0xf5, 0xbc, 0xb6, 0xda, 0x21, 0x10, 0xff, 0xf3, 0xd2,
0xcd, 0x0c, 0x13, 0xec, 0x5f, 0x97, 0x44, 0x17, 0xc4, 0xa7, 0x7e, 0x3d, 0x64, 0x5d, 0x19, 0x73,
0x60, 0x81, 0x4f, 0xdc, 0x22, 0x2a, 0x90, 0x88, 0x46, 0xee, 0xb8, 0x14, 0xde, 0x5e, 0x0b, 0xdb,
0xe0, 0x32, 0x3a, 0x0a, 0x49, 0x06, 0x24, 0x5c, 0xc2, 0xd3, 0xac, 0x62, 0x91, 0x95, 0xe4, 0x79,
0xe7, 0xc8, 0x37, 0x6d, 0x8d, 0xd5, 0x4e, 0xa9, 0x6c, 0x56, 0xf4, 0xea, 0x65, 0x7a, 0xae, 0x08,
0xba, 0x78, 0x25, 0x2e, 0x1c, 0xa6, 0xb4, 0xc6, 0xe8, 0xdd, 0x74, 0x1f, 0x4b, 0xbd, 0x8b, 0x8a,
0x70, 0x3e, 0xb5, 0x66, 0x48, 0x03, 0xf6, 0x0e, 0x61, 0x35, 0x57, 0xb9, 0x86, 0xc1, 0x1d, 0x9e,
0xe1, 0xf8, 0x98, 0x11, 0x69, 0xd9, 0x8e, 0x94, 0x9b, 0x1e, 0x87, 0xe9, 0xce, 0x55, 0x28, 0xdf,
0x8c, 0xa1, 0x89, 0x0d, 0xbf, 0xe6, 0x42, 0x68, 0x41, 0x99, 0x2d, 0x0f, 0xb0, 0x54, 0xbb, 0x16
};
__global__ void device_aes_encrypt(unsigned char *pt, unsigned char *ct) {
//This kernel executes AES encryption on a GPU.
//Please modify this kernel!!
int thread_id = blockIdx.x * blockDim.x + threadIdx.x;
/* if (thread_id == 0)
printf("size = %ld\n", size);
// printf("You can use printf function to eliminate bugs in your kernel.\n");
*/
memcpy(&(SboxCUDA[threadIdx.x << 1]), &(SboxCUDAConst[threadIdx.x << 1]), 2);
__syncthreads();
unsigned char cb[NBb2];
int *cw = (int *) cb;
cw[0] = ((int *) pt)[thread_id << 2] ^ rkey[0];
cw[1] = ((int *) pt)[thread_id << 2 | 1] ^ rkey[1];
cw[2] = ((int *) pt)[thread_id << 2 | 2] ^ rkey[2];
cw[3] = ((int *) pt)[thread_id << 2 | 3] ^ rkey[3];
/* cw[0] = tex1Dfetch(pt_texture, thread_id << 2) ^ rkey[0];
cw[1] = tex1Dfetch(pt_texture, thread_id << 2 | 1) ^ rkey[1];
cw[2] = tex1Dfetch(pt_texture, thread_id << 2 | 2) ^ rkey[2];
cw[3] = tex1Dfetch(pt_texture, thread_id << 2 | 3) ^ rkey[3];*/
//round 1
cw[4] = (MUL2(SboxCUDA[((unsigned char *) cw)[0]]) ^
MUL3(SboxCUDA[((unsigned char *) cw)[5]]) ^
SboxCUDA[((unsigned char *) cw)[10]] ^
SboxCUDA[((unsigned char *) cw)[15]]
|
(MUL2(SboxCUDA[((unsigned char *) cw)[5]]) ^
MUL3(SboxCUDA[((unsigned char *) cw)[10]]) ^
SboxCUDA[((unsigned char *) cw)[15]] ^
SboxCUDA[((unsigned char *) cw)[0]]) << 8
|
(MUL2(SboxCUDA[((unsigned char *) cw)[10]]) ^
MUL3(SboxCUDA[((unsigned char *) cw)[15]]) ^
SboxCUDA[((unsigned char *) cw)[0]] ^
SboxCUDA[((unsigned char *) cw)[5]]) << 16
|
(MUL2(SboxCUDA[((unsigned char *) cw)[15]]) ^
MUL3(SboxCUDA[((unsigned char *) cw)[0]]) ^
SboxCUDA[((unsigned char *) cw)[5]] ^
SboxCUDA[((unsigned char *) cw)[10]]) << 24)
^ rkey[4];
cw[5] = (MUL2(SboxCUDA[((unsigned char *) cw)[4]]) ^
MUL3(SboxCUDA[((unsigned char *) cw)[9]]) ^
SboxCUDA[((unsigned char *) cw)[14]] ^
SboxCUDA[((unsigned char *) cw)[3]]
|
(MUL2(SboxCUDA[((unsigned char *) cw)[9]]) ^
MUL3(SboxCUDA[((unsigned char *) cw)[14]]) ^
SboxCUDA[((unsigned char *) cw)[3]] ^
SboxCUDA[((unsigned char *) cw)[4]]) << 8
|
(MUL2(SboxCUDA[((unsigned char *) cw)[14]]) ^
MUL3(SboxCUDA[((unsigned char *) cw)[3]]) ^
SboxCUDA[((unsigned char *) cw)[4]] ^
SboxCUDA[((unsigned char *) cw)[9]]) << 16
|
(MUL2(SboxCUDA[((unsigned char *) cw)[3]]) ^
MUL3(SboxCUDA[((unsigned char *) cw)[4]]) ^
SboxCUDA[((unsigned char *) cw)[9]] ^
SboxCUDA[((unsigned char *) cw)[14]]) << 24)
^ rkey[5];
cw[6] = (MUL2(SboxCUDA[((unsigned char *) cw)[8]]) ^
MUL3(SboxCUDA[((unsigned char *) cw)[13]]) ^
SboxCUDA[((unsigned char *) cw)[2]] ^
SboxCUDA[((unsigned char *) cw)[7]]
|
(MUL2(SboxCUDA[((unsigned char *) cw)[13]]) ^
MUL3(SboxCUDA[((unsigned char *) cw)[2]]) ^
SboxCUDA[((unsigned char *) cw)[7]] ^
SboxCUDA[((unsigned char *) cw)[8]]) << 8
|
(MUL2(SboxCUDA[((unsigned char *) cw)[2]]) ^
MUL3(SboxCUDA[((unsigned char *) cw)[7]]) ^
SboxCUDA[((unsigned char *) cw)[8]] ^
SboxCUDA[((unsigned char *) cw)[13]]) << 16
|
(MUL2(SboxCUDA[((unsigned char *) cw)[7]]) ^
MUL3(SboxCUDA[((unsigned char *) cw)[8]]) ^
SboxCUDA[((unsigned char *) cw)[13]] ^
SboxCUDA[((unsigned char *) cw)[2]]) << 24)
^ rkey[6];
cw[7] = (MUL2(SboxCUDA[((unsigned char *) cw)[12]]) ^
MUL3(SboxCUDA[((unsigned char *) cw)[1]]) ^
SboxCUDA[((unsigned char *) cw)[6]] ^
SboxCUDA[((unsigned char *) cw)[11]]
|
(MUL2(SboxCUDA[((unsigned char *) cw)[1]]) ^
MUL3(SboxCUDA[((unsigned char *) cw)[6]]) ^
SboxCUDA[((unsigned char *) cw)[11]] ^
SboxCUDA[((unsigned char *) cw)[12]]) << 8
|
(MUL2(SboxCUDA[((unsigned char *) cw)[6]]) ^
MUL3(SboxCUDA[((unsigned char *) cw)[11]]) ^
SboxCUDA[((unsigned char *) cw)[12]] ^
SboxCUDA[((unsigned char *) cw)[1]]) << 16
|
(MUL2(SboxCUDA[((unsigned char *) cw)[11]]) ^
MUL3(SboxCUDA[((unsigned char *) cw)[12]]) ^
SboxCUDA[((unsigned char *) cw)[1]] ^
SboxCUDA[((unsigned char *) cw)[6]]) << 24)
^ rkey[7];
//round 2
cw[0] = (MUL2(SboxCUDA[((unsigned char *) cw)[16]]) ^
MUL3(SboxCUDA[((unsigned char *) cw)[21]]) ^
SboxCUDA[((unsigned char *) cw)[26]] ^
SboxCUDA[((unsigned char *) cw)[31]]
|
(MUL2(SboxCUDA[((unsigned char *) cw)[21]]) ^
MUL3(SboxCUDA[((unsigned char *) cw)[26]]) ^
SboxCUDA[((unsigned char *) cw)[31]] ^
SboxCUDA[((unsigned char *) cw)[16]]) << 8
|
(MUL2(SboxCUDA[((unsigned char *) cw)[26]]) ^
MUL3(SboxCUDA[((unsigned char *) cw)[31]]) ^
SboxCUDA[((unsigned char *) cw)[16]] ^
SboxCUDA[((unsigned char *) cw)[21]]) << 16
|
(MUL2(SboxCUDA[((unsigned char *) cw)[31]]) ^
MUL3(SboxCUDA[((unsigned char *) cw)[16]]) ^
SboxCUDA[((unsigned char *) cw)[21]] ^
SboxCUDA[((unsigned char *) cw)[26]]) << 24)
^ rkey[8];
cw[1] = (MUL2(SboxCUDA[((unsigned char *) cw)[20]]) ^
MUL3(SboxCUDA[((unsigned char *) cw)[25]]) ^
SboxCUDA[((unsigned char *) cw)[30]] ^
SboxCUDA[((unsigned char *) cw)[19]]
|
(MUL2(SboxCUDA[((unsigned char *) cw)[25]]) ^
MUL3(SboxCUDA[((unsigned char *) cw)[30]]) ^
SboxCUDA[((unsigned char *) cw)[19]] ^
SboxCUDA[((unsigned char *) cw)[20]]) << 8
|
(MUL2(SboxCUDA[((unsigned char *) cw)[30]]) ^
MUL3(SboxCUDA[((unsigned char *) cw)[19]]) ^
SboxCUDA[((unsigned char *) cw)[20]] ^
SboxCUDA[((unsigned char *) cw)[25]]) << 16
|
(MUL2(SboxCUDA[((unsigned char *) cw)[19]]) ^
MUL3(SboxCUDA[((unsigned char *) cw)[20]]) ^
SboxCUDA[((unsigned char *) cw)[25]] ^
SboxCUDA[((unsigned char *) cw)[30]]) << 24)
^ rkey[9];
cw[2] = (MUL2(SboxCUDA[((unsigned char *) cw)[24]]) ^
MUL3(SboxCUDA[((unsigned char *) cw)[29]]) ^
SboxCUDA[((unsigned char *) cw)[18]] ^
SboxCUDA[((unsigned char *) cw)[23]]
|
(MUL2(SboxCUDA[((unsigned char *) cw)[29]]) ^
MUL3(SboxCUDA[((unsigned char *) cw)[18]]) ^
SboxCUDA[((unsigned char *) cw)[23]] ^
SboxCUDA[((unsigned char *) cw)[24]]) << 8
|
(MUL2(SboxCUDA[((unsigned char *) cw)[18]]) ^
MUL3(SboxCUDA[((unsigned char *) cw)[23]]) ^
SboxCUDA[((unsigned char *) cw)[24]] ^
SboxCUDA[((unsigned char *) cw)[29]]) << 16
|
(MUL2(SboxCUDA[((unsigned char *) cw)[23]]) ^
MUL3(SboxCUDA[((unsigned char *) cw)[24]]) ^
SboxCUDA[((unsigned char *) cw)[29]] ^
SboxCUDA[((unsigned char *) cw)[18]]) << 24)
^ rkey[10];
cw[3] = (MUL2(SboxCUDA[((unsigned char *) cw)[28]]) ^
MUL3(SboxCUDA[((unsigned char *) cw)[17]]) ^
SboxCUDA[((unsigned char *) cw)[22]] ^
SboxCUDA[((unsigned char *) cw)[27]]
|
(MUL2(SboxCUDA[((unsigned char *) cw)[17]]) ^
MUL3(SboxCUDA[((unsigned char *) cw)[22]]) ^
SboxCUDA[((unsigned char *) cw)[27]] ^
SboxCUDA[((unsigned char *) cw)[28]]) << 8
|
(MUL2(SboxCUDA[((unsigned char *) cw)[22]]) ^
MUL3(SboxCUDA[((unsigned char *) cw)[27]]) ^
SboxCUDA[((unsigned char *) cw)[28]] ^
SboxCUDA[((unsigned char *) cw)[17]]) << 16
|
(MUL2(SboxCUDA[((unsigned char *) cw)[27]]) ^
MUL3(SboxCUDA[((unsigned char *) cw)[28]]) ^
SboxCUDA[((unsigned char *) cw)[17]] ^
SboxCUDA[((unsigned char *) cw)[22]]) << 24)
^ rkey[11];
//round 3
cw[4] = (MUL2(SboxCUDA[((unsigned char *) cw)[0]]) ^
MUL3(SboxCUDA[((unsigned char *) cw)[5]]) ^
SboxCUDA[((unsigned char *) cw)[10]] ^
SboxCUDA[((unsigned char *) cw)[15]]
|
(MUL2(SboxCUDA[((unsigned char *) cw)[5]]) ^
MUL3(SboxCUDA[((unsigned char *) cw)[10]]) ^
SboxCUDA[((unsigned char *) cw)[15]] ^
SboxCUDA[((unsigned char *) cw)[0]]) << 8
|
(MUL2(SboxCUDA[((unsigned char *) cw)[10]]) ^
MUL3(SboxCUDA[((unsigned char *) cw)[15]]) ^
SboxCUDA[((unsigned char *) cw)[0]] ^
SboxCUDA[((unsigned char *) cw)[5]]) << 16
|
(MUL2(SboxCUDA[((unsigned char *) cw)[15]]) ^
MUL3(SboxCUDA[((unsigned char *) cw)[0]]) ^
SboxCUDA[((unsigned char *) cw)[5]] ^
SboxCUDA[((unsigned char *) cw)[10]]) << 24)
^ rkey[12];
cw[5] = (MUL2(SboxCUDA[((unsigned char *) cw)[4]]) ^
MUL3(SboxCUDA[((unsigned char *) cw)[9]]) ^
SboxCUDA[((unsigned char *) cw)[14]] ^
SboxCUDA[((unsigned char *) cw)[3]]
|
(MUL2(SboxCUDA[((unsigned char *) cw)[9]]) ^
MUL3(SboxCUDA[((unsigned char *) cw)[14]]) ^
SboxCUDA[((unsigned char *) cw)[3]] ^
SboxCUDA[((unsigned char *) cw)[4]]) << 8
|
(MUL2(SboxCUDA[((unsigned char *) cw)[14]]) ^
MUL3(SboxCUDA[((unsigned char *) cw)[3]]) ^
SboxCUDA[((unsigned char *) cw)[4]] ^
SboxCUDA[((unsigned char *) cw)[9]]) << 16
|
(MUL2(SboxCUDA[((unsigned char *) cw)[3]]) ^
MUL3(SboxCUDA[((unsigned char *) cw)[4]]) ^
SboxCUDA[((unsigned char *) cw)[9]] ^
SboxCUDA[((unsigned char *) cw)[14]]) << 24)
^ rkey[13];
cw[6] = (MUL2(SboxCUDA[((unsigned char *) cw)[8]]) ^
MUL3(SboxCUDA[((unsigned char *) cw)[13]]) ^
SboxCUDA[((unsigned char *) cw)[2]] ^
SboxCUDA[((unsigned char *) cw)[7]]
|
(MUL2(SboxCUDA[((unsigned char *) cw)[13]]) ^
MUL3(SboxCUDA[((unsigned char *) cw)[2]]) ^
SboxCUDA[((unsigned char *) cw)[7]] ^
SboxCUDA[((unsigned char *) cw)[8]]) << 8
|
(MUL2(SboxCUDA[((unsigned char *) cw)[2]]) ^
MUL3(SboxCUDA[((unsigned char *) cw)[7]]) ^
SboxCUDA[((unsigned char *) cw)[8]] ^
SboxCUDA[((unsigned char *) cw)[13]]) << 16
|
(MUL2(SboxCUDA[((unsigned char *) cw)[7]]) ^
MUL3(SboxCUDA[((unsigned char *) cw)[8]]) ^
SboxCUDA[((unsigned char *) cw)[13]] ^
SboxCUDA[((unsigned char *) cw)[2]]) << 24)
^ rkey[14];
cw[7] = (MUL2(SboxCUDA[((unsigned char *) cw)[12]]) ^
MUL3(SboxCUDA[((unsigned char *) cw)[1]]) ^
SboxCUDA[((unsigned char *) cw)[6]] ^
SboxCUDA[((unsigned char *) cw)[11]]
|
(MUL2(SboxCUDA[((unsigned char *) cw)[1]]) ^
MUL3(SboxCUDA[((unsigned char *) cw)[6]]) ^
SboxCUDA[((unsigned char *) cw)[11]] ^
SboxCUDA[((unsigned char *) cw)[12]]) << 8
|
(MUL2(SboxCUDA[((unsigned char *) cw)[6]]) ^
MUL3(SboxCUDA[((unsigned char *) cw)[11]]) ^
SboxCUDA[((unsigned char *) cw)[12]] ^
SboxCUDA[((unsigned char *) cw)[1]]) << 16
|
(MUL2(SboxCUDA[((unsigned char *) cw)[11]]) ^
MUL3(SboxCUDA[((unsigned char *) cw)[12]]) ^
SboxCUDA[((unsigned char *) cw)[1]] ^
SboxCUDA[((unsigned char *) cw)[6]]) << 24)
^ rkey[15];
//round 4
cw[0] = (MUL2(SboxCUDA[((unsigned char *) cw)[16]]) ^
MUL3(SboxCUDA[((unsigned char *) cw)[21]]) ^
SboxCUDA[((unsigned char *) cw)[26]] ^
SboxCUDA[((unsigned char *) cw)[31]]
|
(MUL2(SboxCUDA[((unsigned char *) cw)[21]]) ^
MUL3(SboxCUDA[((unsigned char *) cw)[26]]) ^
SboxCUDA[((unsigned char *) cw)[31]] ^
SboxCUDA[((unsigned char *) cw)[16]]) << 8
|
(MUL2(SboxCUDA[((unsigned char *) cw)[26]]) ^
MUL3(SboxCUDA[((unsigned char *) cw)[31]]) ^
SboxCUDA[((unsigned char *) cw)[16]] ^
SboxCUDA[((unsigned char *) cw)[21]]) << 16
|
(MUL2(SboxCUDA[((unsigned char *) cw)[31]]) ^
MUL3(SboxCUDA[((unsigned char *) cw)[16]]) ^
SboxCUDA[((unsigned char *) cw)[21]] ^
SboxCUDA[((unsigned char *) cw)[26]]) << 24)
^ rkey[16];
cw[1] = (MUL2(SboxCUDA[((unsigned char *) cw)[20]]) ^
MUL3(SboxCUDA[((unsigned char *) cw)[25]]) ^
SboxCUDA[((unsigned char *) cw)[30]] ^
SboxCUDA[((unsigned char *) cw)[19]]
|
(MUL2(SboxCUDA[((unsigned char *) cw)[25]]) ^
MUL3(SboxCUDA[((unsigned char *) cw)[30]]) ^
SboxCUDA[((unsigned char *) cw)[19]] ^
SboxCUDA[((unsigned char *) cw)[20]]) << 8
|
(MUL2(SboxCUDA[((unsigned char *) cw)[30]]) ^
MUL3(SboxCUDA[((unsigned char *) cw)[19]]) ^
SboxCUDA[((unsigned char *) cw)[20]] ^
SboxCUDA[((unsigned char *) cw)[25]]) << 16
|
(MUL2(SboxCUDA[((unsigned char *) cw)[19]]) ^
MUL3(SboxCUDA[((unsigned char *) cw)[20]]) ^
SboxCUDA[((unsigned char *) cw)[25]] ^
SboxCUDA[((unsigned char *) cw)[30]]) << 24)
^ rkey[17];
cw[2] = (MUL2(SboxCUDA[((unsigned char *) cw)[24]]) ^
MUL3(SboxCUDA[((unsigned char *) cw)[29]]) ^
SboxCUDA[((unsigned char *) cw)[18]] ^
SboxCUDA[((unsigned char *) cw)[23]]
|
(MUL2(SboxCUDA[((unsigned char *) cw)[29]]) ^
MUL3(SboxCUDA[((unsigned char *) cw)[18]]) ^
SboxCUDA[((unsigned char *) cw)[23]] ^
SboxCUDA[((unsigned char *) cw)[24]]) << 8
|
(MUL2(SboxCUDA[((unsigned char *) cw)[18]]) ^
MUL3(SboxCUDA[((unsigned char *) cw)[23]]) ^
SboxCUDA[((unsigned char *) cw)[24]] ^
SboxCUDA[((unsigned char *) cw)[29]]) << 16
|
(MUL2(SboxCUDA[((unsigned char *) cw)[23]]) ^
MUL3(SboxCUDA[((unsigned char *) cw)[24]]) ^
SboxCUDA[((unsigned char *) cw)[29]] ^
SboxCUDA[((unsigned char *) cw)[18]]) << 24)
^ rkey[18];
cw[3] = (MUL2(SboxCUDA[((unsigned char *) cw)[28]]) ^
MUL3(SboxCUDA[((unsigned char *) cw)[17]]) ^
SboxCUDA[((unsigned char *) cw)[22]] ^
SboxCUDA[((unsigned char *) cw)[27]]
|
(MUL2(SboxCUDA[((unsigned char *) cw)[17]]) ^
MUL3(SboxCUDA[((unsigned char *) cw)[22]]) ^
SboxCUDA[((unsigned char *) cw)[27]] ^
SboxCUDA[((unsigned char *) cw)[28]]) << 8
|
(MUL2(SboxCUDA[((unsigned char *) cw)[22]]) ^
MUL3(SboxCUDA[((unsigned char *) cw)[27]]) ^
SboxCUDA[((unsigned char *) cw)[28]] ^
SboxCUDA[((unsigned char *) cw)[17]]) << 16
|
(MUL2(SboxCUDA[((unsigned char *) cw)[27]]) ^
MUL3(SboxCUDA[((unsigned char *) cw)[28]]) ^
SboxCUDA[((unsigned char *) cw)[17]] ^
SboxCUDA[((unsigned char *) cw)[22]]) << 24)
^ rkey[19];
//round 5
cw[4] = (MUL2(SboxCUDA[((unsigned char *) cw)[0]]) ^
MUL3(SboxCUDA[((unsigned char *) cw)[5]]) ^
SboxCUDA[((unsigned char *) cw)[10]] ^
SboxCUDA[((unsigned char *) cw)[15]]
|
(MUL2(SboxCUDA[((unsigned char *) cw)[5]]) ^
MUL3(SboxCUDA[((unsigned char *) cw)[10]]) ^
SboxCUDA[((unsigned char *) cw)[15]] ^
SboxCUDA[((unsigned char *) cw)[0]]) << 8
|
(MUL2(SboxCUDA[((unsigned char *) cw)[10]]) ^
MUL3(SboxCUDA[((unsigned char *) cw)[15]]) ^
SboxCUDA[((unsigned char *) cw)[0]] ^
SboxCUDA[((unsigned char *) cw)[5]]) << 16
|
(MUL2(SboxCUDA[((unsigned char *) cw)[15]]) ^
MUL3(SboxCUDA[((unsigned char *) cw)[0]]) ^
SboxCUDA[((unsigned char *) cw)[5]] ^
SboxCUDA[((unsigned char *) cw)[10]]) << 24)
^ rkey[20];
cw[5] = (MUL2(SboxCUDA[((unsigned char *) cw)[4]]) ^
MUL3(SboxCUDA[((unsigned char *) cw)[9]]) ^
SboxCUDA[((unsigned char *) cw)[14]] ^
SboxCUDA[((unsigned char *) cw)[3]]
|
(MUL2(SboxCUDA[((unsigned char *) cw)[9]]) ^
MUL3(SboxCUDA[((unsigned char *) cw)[14]]) ^
SboxCUDA[((unsigned char *) cw)[3]] ^
SboxCUDA[((unsigned char *) cw)[4]]) << 8
|
(MUL2(SboxCUDA[((unsigned char *) cw)[14]]) ^
MUL3(SboxCUDA[((unsigned char *) cw)[3]]) ^
SboxCUDA[((unsigned char *) cw)[4]] ^
SboxCUDA[((unsigned char *) cw)[9]]) << 16
|
(MUL2(SboxCUDA[((unsigned char *) cw)[3]]) ^
MUL3(SboxCUDA[((unsigned char *) cw)[4]]) ^
SboxCUDA[((unsigned char *) cw)[9]] ^
SboxCUDA[((unsigned char *) cw)[14]]) << 24)
^ rkey[21];
cw[6] = (MUL2(SboxCUDA[((unsigned char *) cw)[8]]) ^
MUL3(SboxCUDA[((unsigned char *) cw)[13]]) ^
SboxCUDA[((unsigned char *) cw)[2]] ^
SboxCUDA[((unsigned char *) cw)[7]]
|
(MUL2(SboxCUDA[((unsigned char *) cw)[13]]) ^
MUL3(SboxCUDA[((unsigned char *) cw)[2]]) ^
SboxCUDA[((unsigned char *) cw)[7]] ^
SboxCUDA[((unsigned char *) cw)[8]]) << 8
|
(MUL2(SboxCUDA[((unsigned char *) cw)[2]]) ^
MUL3(SboxCUDA[((unsigned char *) cw)[7]]) ^
SboxCUDA[((unsigned char *) cw)[8]] ^
SboxCUDA[((unsigned char *) cw)[13]]) << 16
|
(MUL2(SboxCUDA[((unsigned char *) cw)[7]]) ^
MUL3(SboxCUDA[((unsigned char *) cw)[8]]) ^
SboxCUDA[((unsigned char *) cw)[13]] ^
SboxCUDA[((unsigned char *) cw)[2]]) << 24)
^ rkey[22];
cw[7] = (MUL2(SboxCUDA[((unsigned char *) cw)[12]]) ^
MUL3(SboxCUDA[((unsigned char *) cw)[1]]) ^
SboxCUDA[((unsigned char *) cw)[6]] ^
SboxCUDA[((unsigned char *) cw)[11]]
|
(MUL2(SboxCUDA[((unsigned char *) cw)[1]]) ^
MUL3(SboxCUDA[((unsigned char *) cw)[6]]) ^
SboxCUDA[((unsigned char *) cw)[11]] ^
SboxCUDA[((unsigned char *) cw)[12]]) << 8
|
(MUL2(SboxCUDA[((unsigned char *) cw)[6]]) ^
MUL3(SboxCUDA[((unsigned char *) cw)[11]]) ^
SboxCUDA[((unsigned char *) cw)[12]] ^
SboxCUDA[((unsigned char *) cw)[1]]) << 16
|
(MUL2(SboxCUDA[((unsigned char *) cw)[11]]) ^
MUL3(SboxCUDA[((unsigned char *) cw)[12]]) ^
SboxCUDA[((unsigned char *) cw)[1]] ^
SboxCUDA[((unsigned char *) cw)[6]]) << 24)
^ rkey[23];
//round 6
cw[0] = (MUL2(SboxCUDA[((unsigned char *) cw)[16]]) ^
MUL3(SboxCUDA[((unsigned char *) cw)[21]]) ^
SboxCUDA[((unsigned char *) cw)[26]] ^
SboxCUDA[((unsigned char *) cw)[31]]
|
(MUL2(SboxCUDA[((unsigned char *) cw)[21]]) ^
MUL3(SboxCUDA[((unsigned char *) cw)[26]]) ^
SboxCUDA[((unsigned char *) cw)[31]] ^
SboxCUDA[((unsigned char *) cw)[16]]) << 8
|
(MUL2(SboxCUDA[((unsigned char *) cw)[26]]) ^
MUL3(SboxCUDA[((unsigned char *) cw)[31]]) ^
SboxCUDA[((unsigned char *) cw)[16]] ^
SboxCUDA[((unsigned char *) cw)[21]]) << 16
|
(MUL2(SboxCUDA[((unsigned char *) cw)[31]]) ^
MUL3(SboxCUDA[((unsigned char *) cw)[16]]) ^
SboxCUDA[((unsigned char *) cw)[21]] ^
SboxCUDA[((unsigned char *) cw)[26]]) << 24)
^ rkey[24];
cw[1] = (MUL2(SboxCUDA[((unsigned char *) cw)[20]]) ^
MUL3(SboxCUDA[((unsigned char *) cw)[25]]) ^
SboxCUDA[((unsigned char *) cw)[30]] ^
SboxCUDA[((unsigned char *) cw)[19]]
|
(MUL2(SboxCUDA[((unsigned char *) cw)[25]]) ^
MUL3(SboxCUDA[((unsigned char *) cw)[30]]) ^
SboxCUDA[((unsigned char *) cw)[19]] ^
SboxCUDA[((unsigned char *) cw)[20]]) << 8
|
(MUL2(SboxCUDA[((unsigned char *) cw)[30]]) ^
MUL3(SboxCUDA[((unsigned char *) cw)[19]]) ^
SboxCUDA[((unsigned char *) cw)[20]] ^
SboxCUDA[((unsigned char *) cw)[25]]) << 16
|
(MUL2(SboxCUDA[((unsigned char *) cw)[19]]) ^
MUL3(SboxCUDA[((unsigned char *) cw)[20]]) ^
SboxCUDA[((unsigned char *) cw)[25]] ^
SboxCUDA[((unsigned char *) cw)[30]]) << 24)
^ rkey[25];
cw[2] = (MUL2(SboxCUDA[((unsigned char *) cw)[24]]) ^
MUL3(SboxCUDA[((unsigned char *) cw)[29]]) ^
SboxCUDA[((unsigned char *) cw)[18]] ^
SboxCUDA[((unsigned char *) cw)[23]]
|
(MUL2(SboxCUDA[((unsigned char *) cw)[29]]) ^
MUL3(SboxCUDA[((unsigned char *) cw)[18]]) ^
SboxCUDA[((unsigned char *) cw)[23]] ^
SboxCUDA[((unsigned char *) cw)[24]]) << 8
|
(MUL2(SboxCUDA[((unsigned char *) cw)[18]]) ^
MUL3(SboxCUDA[((unsigned char *) cw)[23]]) ^
SboxCUDA[((unsigned char *) cw)[24]] ^
SboxCUDA[((unsigned char *) cw)[29]]) << 16
|
(MUL2(SboxCUDA[((unsigned char *) cw)[23]]) ^
MUL3(SboxCUDA[((unsigned char *) cw)[24]]) ^
SboxCUDA[((unsigned char *) cw)[29]] ^
SboxCUDA[((unsigned char *) cw)[18]]) << 24)
^ rkey[26];
cw[3] = (MUL2(SboxCUDA[((unsigned char *) cw)[28]]) ^
MUL3(SboxCUDA[((unsigned char *) cw)[17]]) ^
SboxCUDA[((unsigned char *) cw)[22]] ^
SboxCUDA[((unsigned char *) cw)[27]]
|
(MUL2(SboxCUDA[((unsigned char *) cw)[17]]) ^
MUL3(SboxCUDA[((unsigned char *) cw)[22]]) ^
SboxCUDA[((unsigned char *) cw)[27]] ^
SboxCUDA[((unsigned char *) cw)[28]]) << 8
|
(MUL2(SboxCUDA[((unsigned char *) cw)[22]]) ^
MUL3(SboxCUDA[((unsigned char *) cw)[27]]) ^
SboxCUDA[((unsigned char *) cw)[28]] ^
SboxCUDA[((unsigned char *) cw)[17]]) << 16
|
(MUL2(SboxCUDA[((unsigned char *) cw)[27]]) ^
MUL3(SboxCUDA[((unsigned char *) cw)[28]]) ^
SboxCUDA[((unsigned char *) cw)[17]] ^
SboxCUDA[((unsigned char *) cw)[22]]) << 24)
^ rkey[27];
//round 7
cw[4] = (MUL2(SboxCUDA[((unsigned char *) cw)[0]]) ^
MUL3(SboxCUDA[((unsigned char *) cw)[5]]) ^
SboxCUDA[((unsigned char *) cw)[10]] ^
SboxCUDA[((unsigned char *) cw)[15]]
|
(MUL2(SboxCUDA[((unsigned char *) cw)[5]]) ^
MUL3(SboxCUDA[((unsigned char *) cw)[10]]) ^
SboxCUDA[((unsigned char *) cw)[15]] ^
SboxCUDA[((unsigned char *) cw)[0]]) << 8
|
(MUL2(SboxCUDA[((unsigned char *) cw)[10]]) ^
MUL3(SboxCUDA[((unsigned char *) cw)[15]]) ^
SboxCUDA[((unsigned char *) cw)[0]] ^
SboxCUDA[((unsigned char *) cw)[5]]) << 16
|
(MUL2(SboxCUDA[((unsigned char *) cw)[15]]) ^
MUL3(SboxCUDA[((unsigned char *) cw)[0]]) ^
SboxCUDA[((unsigned char *) cw)[5]] ^
SboxCUDA[((unsigned char *) cw)[10]]) << 24)
^ rkey[28];
cw[5] = (MUL2(SboxCUDA[((unsigned char *) cw)[4]]) ^
MUL3(SboxCUDA[((unsigned char *) cw)[9]]) ^
SboxCUDA[((unsigned char *) cw)[14]] ^
SboxCUDA[((unsigned char *) cw)[3]]
|
(MUL2(SboxCUDA[((unsigned char *) cw)[9]]) ^
MUL3(SboxCUDA[((unsigned char *) cw)[14]]) ^
SboxCUDA[((unsigned char *) cw)[3]] ^
SboxCUDA[((unsigned char *) cw)[4]]) << 8
|
(MUL2(SboxCUDA[((unsigned char *) cw)[14]]) ^
MUL3(SboxCUDA[((unsigned char *) cw)[3]]) ^
SboxCUDA[((unsigned char *) cw)[4]] ^
SboxCUDA[((unsigned char *) cw)[9]]) << 16
|
(MUL2(SboxCUDA[((unsigned char *) cw)[3]]) ^
MUL3(SboxCUDA[((unsigned char *) cw)[4]]) ^
SboxCUDA[((unsigned char *) cw)[9]] ^
SboxCUDA[((unsigned char *) cw)[14]]) << 24)
^ rkey[29];
cw[6] = (MUL2(SboxCUDA[((unsigned char *) cw)[8]]) ^
MUL3(SboxCUDA[((unsigned char *) cw)[13]]) ^
SboxCUDA[((unsigned char *) cw)[2]] ^
SboxCUDA[((unsigned char *) cw)[7]]
|
(MUL2(SboxCUDA[((unsigned char *) cw)[13]]) ^
MUL3(SboxCUDA[((unsigned char *) cw)[2]]) ^
SboxCUDA[((unsigned char *) cw)[7]] ^
SboxCUDA[((unsigned char *) cw)[8]]) << 8
|
(MUL2(SboxCUDA[((unsigned char *) cw)[2]]) ^
MUL3(SboxCUDA[((unsigned char *) cw)[7]]) ^
SboxCUDA[((unsigned char *) cw)[8]] ^
SboxCUDA[((unsigned char *) cw)[13]]) << 16
|
(MUL2(SboxCUDA[((unsigned char *) cw)[7]]) ^
MUL3(SboxCUDA[((unsigned char *) cw)[8]]) ^
SboxCUDA[((unsigned char *) cw)[13]] ^
SboxCUDA[((unsigned char *) cw)[2]]) << 24)
^ rkey[30];
cw[7] = (MUL2(SboxCUDA[((unsigned char *) cw)[12]]) ^
MUL3(SboxCUDA[((unsigned char *) cw)[1]]) ^
SboxCUDA[((unsigned char *) cw)[6]] ^
SboxCUDA[((unsigned char *) cw)[11]]
|
(MUL2(SboxCUDA[((unsigned char *) cw)[1]]) ^
MUL3(SboxCUDA[((unsigned char *) cw)[6]]) ^
SboxCUDA[((unsigned char *) cw)[11]] ^
SboxCUDA[((unsigned char *) cw)[12]]) << 8
|
(MUL2(SboxCUDA[((unsigned char *) cw)[6]]) ^
MUL3(SboxCUDA[((unsigned char *) cw)[11]]) ^
SboxCUDA[((unsigned char *) cw)[12]] ^
SboxCUDA[((unsigned char *) cw)[1]]) << 16
|
(MUL2(SboxCUDA[((unsigned char *) cw)[11]]) ^
MUL3(SboxCUDA[((unsigned char *) cw)[12]]) ^
SboxCUDA[((unsigned char *) cw)[1]] ^
SboxCUDA[((unsigned char *) cw)[6]]) << 24)
^ rkey[31];
//round 8
cw[0] = (MUL2(SboxCUDA[((unsigned char *) cw)[16]]) ^
MUL3(SboxCUDA[((unsigned char *) cw)[21]]) ^
SboxCUDA[((unsigned char *) cw)[26]] ^
SboxCUDA[((unsigned char *) cw)[31]]
|
(MUL2(SboxCUDA[((unsigned char *) cw)[21]]) ^
MUL3(SboxCUDA[((unsigned char *) cw)[26]]) ^
SboxCUDA[((unsigned char *) cw)[31]] ^
SboxCUDA[((unsigned char *) cw)[16]]) << 8
|
(MUL2(SboxCUDA[((unsigned char *) cw)[26]]) ^
MUL3(SboxCUDA[((unsigned char *) cw)[31]]) ^
SboxCUDA[((unsigned char *) cw)[16]] ^
SboxCUDA[((unsigned char *) cw)[21]]) << 16
|
(MUL2(SboxCUDA[((unsigned char *) cw)[31]]) ^
MUL3(SboxCUDA[((unsigned char *) cw)[16]]) ^
SboxCUDA[((unsigned char *) cw)[21]] ^
SboxCUDA[((unsigned char *) cw)[26]]) << 24)
^ rkey[32];
cw[1] = (MUL2(SboxCUDA[((unsigned char *) cw)[20]]) ^
MUL3(SboxCUDA[((unsigned char *) cw)[25]]) ^
SboxCUDA[((unsigned char *) cw)[30]] ^
SboxCUDA[((unsigned char *) cw)[19]]
|
(MUL2(SboxCUDA[((unsigned char *) cw)[25]]) ^
MUL3(SboxCUDA[((unsigned char *) cw)[30]]) ^
SboxCUDA[((unsigned char *) cw)[19]] ^
SboxCUDA[((unsigned char *) cw)[20]]) << 8
|
(MUL2(SboxCUDA[((unsigned char *) cw)[30]]) ^
MUL3(SboxCUDA[((unsigned char *) cw)[19]]) ^
SboxCUDA[((unsigned char *) cw)[20]] ^
SboxCUDA[((unsigned char *) cw)[25]]) << 16
|
(MUL2(SboxCUDA[((unsigned char *) cw)[19]]) ^
MUL3(SboxCUDA[((unsigned char *) cw)[20]]) ^
SboxCUDA[((unsigned char *) cw)[25]] ^
SboxCUDA[((unsigned char *) cw)[30]]) << 24)
^ rkey[33];
cw[2] = (MUL2(SboxCUDA[((unsigned char *) cw)[24]]) ^
MUL3(SboxCUDA[((unsigned char *) cw)[29]]) ^
SboxCUDA[((unsigned char *) cw)[18]] ^
SboxCUDA[((unsigned char *) cw)[23]]
|
(MUL2(SboxCUDA[((unsigned char *) cw)[29]]) ^
MUL3(SboxCUDA[((unsigned char *) cw)[18]]) ^
SboxCUDA[((unsigned char *) cw)[23]] ^
SboxCUDA[((unsigned char *) cw)[24]]) << 8
|
(MUL2(SboxCUDA[((unsigned char *) cw)[18]]) ^
MUL3(SboxCUDA[((unsigned char *) cw)[23]]) ^
SboxCUDA[((unsigned char *) cw)[24]] ^
SboxCUDA[((unsigned char *) cw)[29]]) << 16
|
(MUL2(SboxCUDA[((unsigned char *) cw)[23]]) ^
MUL3(SboxCUDA[((unsigned char *) cw)[24]]) ^
SboxCUDA[((unsigned char *) cw)[29]] ^
SboxCUDA[((unsigned char *) cw)[18]]) << 24)
^ rkey[34];
cw[3] = (MUL2(SboxCUDA[((unsigned char *) cw)[28]]) ^
MUL3(SboxCUDA[((unsigned char *) cw)[17]]) ^
SboxCUDA[((unsigned char *) cw)[22]] ^
SboxCUDA[((unsigned char *) cw)[27]]
|
(MUL2(SboxCUDA[((unsigned char *) cw)[17]]) ^
MUL3(SboxCUDA[((unsigned char *) cw)[22]]) ^
SboxCUDA[((unsigned char *) cw)[27]] ^
SboxCUDA[((unsigned char *) cw)[28]]) << 8
|
(MUL2(SboxCUDA[((unsigned char *) cw)[22]]) ^
MUL3(SboxCUDA[((unsigned char *) cw)[27]]) ^
SboxCUDA[((unsigned char *) cw)[28]] ^
SboxCUDA[((unsigned char *) cw)[17]]) << 16
|
(MUL2(SboxCUDA[((unsigned char *) cw)[27]]) ^
MUL3(SboxCUDA[((unsigned char *) cw)[28]]) ^
SboxCUDA[((unsigned char *) cw)[17]] ^
SboxCUDA[((unsigned char *) cw)[22]]) << 24)
^ rkey[35];
//round 9
cw[4] = (MUL2(SboxCUDA[((unsigned char *) cw)[0]]) ^
MUL3(SboxCUDA[((unsigned char *) cw)[5]]) ^
SboxCUDA[((unsigned char *) cw)[10]] ^
SboxCUDA[((unsigned char *) cw)[15]]
|
(MUL2(SboxCUDA[((unsigned char *) cw)[5]]) ^
MUL3(SboxCUDA[((unsigned char *) cw)[10]]) ^
SboxCUDA[((unsigned char *) cw)[15]] ^
SboxCUDA[((unsigned char *) cw)[0]]) << 8
|
(MUL2(SboxCUDA[((unsigned char *) cw)[10]]) ^
MUL3(SboxCUDA[((unsigned char *) cw)[15]]) ^
SboxCUDA[((unsigned char *) cw)[0]] ^
SboxCUDA[((unsigned char *) cw)[5]]) << 16
|
(MUL2(SboxCUDA[((unsigned char *) cw)[15]]) ^
MUL3(SboxCUDA[((unsigned char *) cw)[0]]) ^
SboxCUDA[((unsigned char *) cw)[5]] ^
SboxCUDA[((unsigned char *) cw)[10]]) << 24)
^ rkey[36];
cw[5] = (MUL2(SboxCUDA[((unsigned char *) cw)[4]]) ^
MUL3(SboxCUDA[((unsigned char *) cw)[9]]) ^
SboxCUDA[((unsigned char *) cw)[14]] ^
SboxCUDA[((unsigned char *) cw)[3]]
|
(MUL2(SboxCUDA[((unsigned char *) cw)[9]]) ^
MUL3(SboxCUDA[((unsigned char *) cw)[14]]) ^
SboxCUDA[((unsigned char *) cw)[3]] ^
SboxCUDA[((unsigned char *) cw)[4]]) << 8
|
(MUL2(SboxCUDA[((unsigned char *) cw)[14]]) ^
MUL3(SboxCUDA[((unsigned char *) cw)[3]]) ^
SboxCUDA[((unsigned char *) cw)[4]] ^
SboxCUDA[((unsigned char *) cw)[9]]) << 16
|
(MUL2(SboxCUDA[((unsigned char *) cw)[3]]) ^
MUL3(SboxCUDA[((unsigned char *) cw)[4]]) ^
SboxCUDA[((unsigned char *) cw)[9]] ^
SboxCUDA[((unsigned char *) cw)[14]]) << 24)
^ rkey[37];
cw[6] = (MUL2(SboxCUDA[((unsigned char *) cw)[8]]) ^
MUL3(SboxCUDA[((unsigned char *) cw)[13]]) ^
SboxCUDA[((unsigned char *) cw)[2]] ^
SboxCUDA[((unsigned char *) cw)[7]]
|
(MUL2(SboxCUDA[((unsigned char *) cw)[13]]) ^
MUL3(SboxCUDA[((unsigned char *) cw)[2]]) ^
SboxCUDA[((unsigned char *) cw)[7]] ^
SboxCUDA[((unsigned char *) cw)[8]]) << 8
|
(MUL2(SboxCUDA[((unsigned char *) cw)[2]]) ^
MUL3(SboxCUDA[((unsigned char *) cw)[7]]) ^
SboxCUDA[((unsigned char *) cw)[8]] ^
SboxCUDA[((unsigned char *) cw)[13]]) << 16
|
(MUL2(SboxCUDA[((unsigned char *) cw)[7]]) ^
MUL3(SboxCUDA[((unsigned char *) cw)[8]]) ^
SboxCUDA[((unsigned char *) cw)[13]] ^
SboxCUDA[((unsigned char *) cw)[2]]) << 24)
^ rkey[38];
cw[7] = (MUL2(SboxCUDA[((unsigned char *) cw)[12]]) ^
MUL3(SboxCUDA[((unsigned char *) cw)[1]]) ^
SboxCUDA[((unsigned char *) cw)[6]] ^
SboxCUDA[((unsigned char *) cw)[11]]
|
(MUL2(SboxCUDA[((unsigned char *) cw)[1]]) ^
MUL3(SboxCUDA[((unsigned char *) cw)[6]]) ^
SboxCUDA[((unsigned char *) cw)[11]] ^
SboxCUDA[((unsigned char *) cw)[12]]) << 8
|
(MUL2(SboxCUDA[((unsigned char *) cw)[6]]) ^
MUL3(SboxCUDA[((unsigned char *) cw)[11]]) ^
SboxCUDA[((unsigned char *) cw)[12]] ^
SboxCUDA[((unsigned char *) cw)[1]]) << 16
|
(MUL2(SboxCUDA[((unsigned char *) cw)[11]]) ^
MUL3(SboxCUDA[((unsigned char *) cw)[12]]) ^
SboxCUDA[((unsigned char *) cw)[1]] ^
SboxCUDA[((unsigned char *) cw)[6]]) << 24)
^ rkey[39];
cb[0] = SboxCUDA[cb[16]];
cb[1] = SboxCUDA[cb[21]];
cb[2] = SboxCUDA[cb[26]];
cb[3] = SboxCUDA[cb[31]];
((int *) ct)[thread_id << 2] = cw[0] ^ rkey[40];
cb[4] = SboxCUDA[cb[20]];
cb[5] = SboxCUDA[cb[25]];
cb[6] = SboxCUDA[cb[30]];
cb[7] = SboxCUDA[cb[19]];
((int *) ct)[thread_id << 2 | 1] = cw[1] ^ rkey[41];
cb[8] = SboxCUDA[cb[24]];
cb[9] = SboxCUDA[cb[29]];
cb[10] = SboxCUDA[cb[18]];
cb[11] = SboxCUDA[cb[23]];
((int *) ct)[thread_id << 2 | 2] = cw[2] ^ rkey[42];
cb[12] = SboxCUDA[cb[28]];
cb[13] = SboxCUDA[cb[17]];
cb[14] = SboxCUDA[cb[22]];
cb[15] = SboxCUDA[cb[27]];
((int *) ct)[thread_id << 2 | 3] = cw[3] ^ rkey[43];
/* if (thread_id == 0) {
printf("state0: 0x%x\n", ((int *) ct)[thread_id << 2]);
printf("state1: 0x%x\n", ((int *) ct)[thread_id << 2|1]);
printf("state2: 0x%x\n", ((int *) ct)[thread_id << 2|2]);
printf("state3: 0x%x\n", ((int *) ct)[thread_id << 2|3]);
}*/
}
void launch_aes_kernel(unsigned char *pt, int *rk, unsigned char *ct, long int size) {
//This function launches the AES kernel.
//Please modify this function for AES kernel.
//In this function, you need to allocate the device memory and so on.
unsigned char *d_ct, *d_pt;
// int *d_pt;
long int size2 = size >> 6;
cudaStream_t stream[Stream];
dim3 dim_grid(GRIDSIZE >> 6, 1, 1), dim_block(BLOCKSIZE, 1, 1);
cudaHostRegister(pt, size, cudaHostRegisterDefault);
cudaHostRegister(ct, size, cudaHostRegisterDefault);
cudaMalloc((void **) &d_pt, size);
cudaMalloc((void **) &d_ct, size);
cudaMemcpyToSymbol(rkey, rk, 176);
cudaStreamCreateWithFlags(&stream[0], cudaStreamNonBlocking);
cudaMemcpyAsync(d_pt, pt, size2, cudaMemcpyHostToDevice, stream[0]);
// cudaBindTexture(NULL, pt_texture, d_pt);
int i;
for (i = 0; i < Stream; i++) {
device_aes_encrypt <<< dim_grid, dim_block, 0, stream[i] >>> (d_pt + size2 * i, d_ct + size2 * i);
cudaMemcpyAsync(ct + size2 * i, d_ct + size2 * i, size2, cudaMemcpyDeviceToHost, stream[i]);
if (i != Stream - 1) {
cudaStreamCreateWithFlags(&stream[i + 1], cudaStreamNonBlocking);
cudaMemcpyAsync(d_pt + size2 * (i + 1), pt + size2 * (i + 1), size2, cudaMemcpyHostToDevice, stream[i + 1]);
}
}
// cudaUnbindTexture(pt_texture);
int stm;
for (stm = 0; stm < Stream; stm++) {
cudaStreamDestroy(stream[stm]);
}
cudaHostUnregister(pt);
cudaHostUnregister(ct);
cudaFree(d_pt);
cudaFree(d_ct);
}
|
5f20fa21d0af5a7f6646c47cb635ee8ebd3a387f.hip | // !!! This is a file automatically generated by hipify!!!
/*
Copyright 2020 The OneFlow Authors. All rights reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
#include "oneflow/core/framework/framework.h"
#include "oneflow/user/kernels/slice_util.h"
#include "oneflow/core/kernel/new_kernel_util.h"
namespace oneflow {
namespace {
inline hipblasOperation_t GetCublasOp(char op) {
switch (op) {
case 'n':
case 'N': {
return HIPBLAS_OP_N;
}
case 't':
case 'T': {
return HIPBLAS_OP_T;
}
case 'c':
case 'C': {
return HIPBLAS_OP_C;
}
default: {
UNIMPLEMENTED();
}
}
return HIPBLAS_OP_N;
}
template<typename T>
struct CudaDataTypeTrait;
template<>
struct CudaDataTypeTrait<float> {
const static hipDataType value = HIP_R_32F;
};
template<>
struct CudaDataTypeTrait<half> {
const static hipDataType value = HIP_R_16F;
};
template<typename T>
void CublasBatchGemm(hipblasHandle_t handle, char transa, char transb, int64_t m, int64_t n,
int64_t k, T alpha, const T* a, int64_t lda, int64_t stridea, const T* b,
int64_t ldb, int64_t strideb, T beta, T* c, int64_t ldc, int64_t stridec,
int64_t batch_size) {
hipblasOperation_t opa = GetCublasOp(transa);
hipblasOperation_t opb = GetCublasOp(transb);
if (TORCH_HIP_VERSION >= 9010 && GetCudaSmVersion() >= 500) {
#if TORCH_HIP_VERSION >= 9010
hipDataType data_type = CudaDataTypeTrait<T>::value;
OF_CUBLAS_CHECK(hipblasGemmStridedBatchedEx(
handle, opa, opb, m, n, k, reinterpret_cast<const void*>(&alpha),
reinterpret_cast<const void*>(a), data_type, lda, stridea, reinterpret_cast<const void*>(b),
data_type, ldb, strideb, reinterpret_cast<const void*>(&beta), reinterpret_cast<void*>(c),
data_type, ldc, stridec, batch_size, data_type, HIPBLAS_GEMM_DEFAULT));
#else
UNIMPLEMENTED();
#endif
} else {
cublas_gemmStridedBatched<T>(handle, opa, opb, m, n, k, &alpha, a, ldb, stridea, b, ldb,
strideb, &beta, c, ldc, stridec, batch_size);
}
}
#if TORCH_HIP_VERSION >= 9010
template<>
void CublasBatchGemm<half>(hipblasHandle_t handle, char transa, char transb, int64_t m, int64_t n,
int64_t k, half alpha, const half* a, int64_t lda, int64_t stridea,
const half* b, int64_t ldb, int64_t strideb, half beta, half* c,
int64_t ldc, int64_t stridec, int64_t batch_size) {
using comp_t = float;
hipblasOperation_t opa = GetCublasOp(transa);
hipblasOperation_t opb = GetCublasOp(transb);
if (GetCudaSmVersion() >= 500) {
float alpha_f = static_cast<comp_t>(alpha);
float beta_f = static_cast<comp_t>(beta);
#if TORCH_HIP_VERSION >= 11000
hipblasGemmAlgo_t algo = HIPBLAS_GEMM_DEFAULT;
#else
hipblasGemmAlgo_t algo = CUBLAS_GEMM_DEFAULT_TENSOR_OP;
#endif
hipDataType data_type = CudaDataTypeTrait<half>::value;
hipDataType comp_type = CudaDataTypeTrait<comp_t>::value;
OF_CUBLAS_CHECK(hipblasGemmStridedBatchedEx(
handle, opa, opb, m, n, k, &alpha_f, reinterpret_cast<const void*>(a), data_type, lda,
stridea, reinterpret_cast<const void*>(b), data_type, ldb, strideb, &beta_f,
reinterpret_cast<void*>(c), data_type, ldc, stridec, batch_size, comp_type, algo));
} else {
cublas_gemmStridedBatched<half>(handle, opa, opb, m, n, k, &alpha, a, lda, stridea, b, ldb,
strideb, &beta, c, ldc, stridec, batch_size);
}
}
template<>
void CublasBatchGemm<float16>(hipblasHandle_t handle, char transa, char transb, int64_t m, int64_t n,
int64_t k, float16 alpha, const float16* a, int64_t lda,
int64_t stridea, const float16* b, int64_t ldb, int64_t strideb,
float16 beta, float16* c, int64_t ldc, int64_t stridec,
int64_t batch_size) {
CublasBatchGemm<half>(handle, transa, transb, m, n, k, static_cast<half>(alpha),
reinterpret_cast<const half*>(a), lda, stridea,
reinterpret_cast<const half*>(b), ldb, strideb, static_cast<half>(beta),
reinterpret_cast<half*>(c), ldc, stridec, batch_size);
}
#endif // TORCH_HIP_VERSION >= 9010
template<typename T>
void BatchedGemm(DeviceCtx* ctx, char opa, char opb, int64_t m, int64_t n, int64_t k, float alpha,
const T* a, int64_t lda, int64_t stridea, const T* b, int64_t ldb, int64_t strideb,
float beta, T* c, int64_t ldc, int64_t stridec, int64_t batch_size) {
// swap m and n, a and b to convert from row-major to col-major
CublasBatchGemm<T>(ctx->cublas_pmh_handle(), opb, opa, n, m, k, static_cast<T>(alpha), b, ldb,
strideb, a, lda, stridea, static_cast<T>(beta), c, ldc, stridec, batch_size);
}
SliceParams ConstructSliceParams4Value(int64_t seq_len, int64_t batch_size, int64_t num_heads,
int64_t head_size) {
// slice (s, b, n, 3, h) to (s, b, n, 1, h)
SliceParams params;
std::memset(¶ms, 0, sizeof(SliceParams));
params.ndim = 4;
params.dims[0] = seq_len;
params.dims[1] = batch_size;
params.dims[2] = num_heads;
params.dims[3] = 3 * head_size;
params.start[0] = 0;
params.start[1] = 0;
params.start[2] = 0;
params.start[3] = 2 * head_size;
params.step[0] = 1;
params.step[1] = 1;
params.step[2] = 1;
params.step[3] = 1;
params.size[0] = seq_len;
params.size[1] = batch_size;
params.size[2] = num_heads;
params.size[3] = head_size;
return params;
}
template<typename T>
void TransposeGpu(DeviceCtx* ctx, const ShapeView& in_shape, const ShapeView& out_shape,
const std::vector<int32_t>& perm, const T* in, T* out) {
CHECK_EQ(in_shape.NumAxes(), out_shape.NumAxes());
int32_t num_axes = in_shape.NumAxes();
CHECK_EQ(num_axes, perm.size());
for (int i = 0; i < perm.size(); ++i) { CHECK_EQ(in_shape.At(perm[i]), out_shape.At(i)); }
int64_t elem_cnt = in_shape.elem_cnt();
NewKernelUtil<DeviceType::kGPU>::Transpose(ctx, num_axes, in_shape, out_shape, perm, elem_cnt, in,
out);
}
template<typename T>
class FusedSelfAttentionQueryMulKeyAndValueGpuKernel final : public user_op::OpKernel {
public:
FusedSelfAttentionQueryMulKeyAndValueGpuKernel() = default;
~FusedSelfAttentionQueryMulKeyAndValueGpuKernel() override = default;
private:
void Compute(user_op::KernelComputeContext* ctx) const override {
const user_op::Tensor* h_tensor = ctx->Tensor4ArgNameAndIndex("hidden_states", 0);
int64_t seq_len = h_tensor->shape().At(0);
int64_t batch_size = h_tensor->shape().At(1);
int64_t hidden_size = h_tensor->shape().At(2);
int64_t head_size = ctx->Attr<int64_t>("head_size");
int64_t num_heads = hidden_size / (3 * head_size);
int64_t ld = batch_size * hidden_size;
int64_t stride = 3 * head_size;
int64_t k_offset = head_size;
// q * k: (sq, b, n, h) x (sk, b, n, h) => (b, n, sq, h) x (b, n, sk, h)
// => (b, n, sq, h) x (b, n, h, sk) -> (b, n, sq, sk)
float alpha = ctx->Attr<float>("alpha");
user_op::Tensor* qmk_tensor = ctx->Tensor4ArgNameAndIndex("query_mul_key", 0);
const T* q_dptr = h_tensor->dptr<T>();
const T* k_dptr = h_tensor->dptr<T>() + k_offset;
BatchedGemm<T>(ctx->device_ctx(), 'N', 'T', seq_len, seq_len, head_size, alpha, q_dptr, ld,
stride, k_dptr, ld, stride, 0.0f, qmk_tensor->mut_dptr<T>(), seq_len,
seq_len * seq_len, batch_size * num_heads);
// slice v
user_op::Tensor* tmp_v_tensor = ctx->Tensor4ArgNameAndIndex("tmp_buffer", 0);
user_op::Tensor* v_tensor = ctx->Tensor4ArgNameAndIndex("value", 0);
SliceParams params = ConstructSliceParams4Value(seq_len, batch_size, num_heads, head_size);
SliceKernelUtil<DeviceType::kGPU, T>::Forward(ctx->device_ctx(), params, h_tensor->dptr<T>(),
tmp_v_tensor->mut_dptr<T>());
// v from (s, b, n, h) transpose to (b, n, s, h)
Shape value_shape({seq_len, batch_size, num_heads, head_size});
TransposeGpu<T>(ctx->device_ctx(), value_shape, v_tensor->shape(), {1, 2, 0, 3},
tmp_v_tensor->dptr<T>(), v_tensor->mut_dptr<T>());
}
bool AlwaysComputeWhenAllOutputsEmpty() const override { return false; }
};
template<typename T>
class FusedSelfAttentionQueryMulKeyAndValueGradGpuKernel final : public user_op::OpKernel {
public:
FusedSelfAttentionQueryMulKeyAndValueGradGpuKernel() = default;
~FusedSelfAttentionQueryMulKeyAndValueGradGpuKernel() override = default;
private:
void Compute(user_op::KernelComputeContext* ctx) const override {
const user_op::Tensor* v_grad_tensor = ctx->Tensor4ArgNameAndIndex("value_grad", 0);
const user_op::Tensor* qmk_grad_tensor = ctx->Tensor4ArgNameAndIndex("query_mul_key_grad", 0);
const user_op::Tensor* h_tensor = ctx->Tensor4ArgNameAndIndex("hidden_states", 0);
user_op::Tensor* tmp_v_tensor = ctx->Tensor4ArgNameAndIndex("tmp_buffer", 0);
user_op::Tensor* h_grad_tensor = ctx->Tensor4ArgNameAndIndex("hidden_states_grad", 0);
float alpha = ctx->Attr<float>("alpha");
int64_t seq_len = h_grad_tensor->shape().At(0);
int64_t batch_size = h_grad_tensor->shape().At(1);
int64_t hidden_size = h_grad_tensor->shape().At(2);
int64_t num_heads = v_grad_tensor->shape().At(1);
int64_t head_size = v_grad_tensor->shape().At(3);
int64_t ld = batch_size * hidden_size;
int64_t stride = 3 * head_size;
CHECK_EQ(hidden_size, num_heads * stride);
// transpose from (b, n, s, h) to (s, b, n, h)
Shape value_shape({seq_len, batch_size, num_heads, head_size});
TransposeGpu<T>(ctx->device_ctx(), v_grad_tensor->shape(), value_shape, {2, 0, 1, 3},
v_grad_tensor->dptr<T>(), tmp_v_tensor->mut_dptr<T>());
// slice v grad
SliceParams params = ConstructSliceParams4Value(seq_len, batch_size, num_heads, head_size);
SliceKernelUtil<DeviceType::kGPU, T>::Backward(
ctx->device_ctx(), params, tmp_v_tensor->dptr<T>(), h_grad_tensor->mut_dptr<T>());
// grad_q = grad_qmk * k
// (b, n, sq, sk) x (b, n, sk, h) -> (b, n, s, h) <= (s, b, n, h) <= (s, b, n, 3, h)
const T* qmk_grad_dptr = qmk_grad_tensor->dptr<T>();
const T* k_dptr = h_tensor->dptr<T>() + head_size;
T* grad_q_dptr = h_grad_tensor->mut_dptr<T>();
BatchedGemm<T>(ctx->device_ctx(), 'N', 'N', seq_len, head_size, seq_len, alpha, qmk_grad_dptr,
seq_len, seq_len * seq_len, k_dptr, ld, stride, 0.0f, grad_q_dptr, ld, stride,
batch_size * num_heads);
// grad_k = grad_qmk * q
// (b, n, sk, sq) x (b, n, sq, h) -> (b, n, sk, h) <= (s, b, n, h) <= (s, b, n, 3, h)
const T* q_dptr = h_tensor->dptr<T>();
T* grad_k_dptr = h_grad_tensor->mut_dptr<T>() + head_size;
BatchedGemm<T>(ctx->device_ctx(), 'T', 'N', seq_len, head_size, seq_len, alpha, qmk_grad_dptr,
seq_len, seq_len * seq_len, q_dptr, ld, stride, 0.0f, grad_k_dptr, ld, stride,
batch_size * num_heads);
}
bool AlwaysComputeWhenAllOutputsEmpty() const override { return false; }
};
size_t InferTmpBufferSize(user_op::InferContext* ctx) {
const Shape* value_shape = ctx->OutputShape("value", 0);
DataType value_dtype = *ctx->Dtype4ArgNameAndIndex("value", 0);
return value_shape->elem_cnt() * GetSizeOfDataType(value_dtype);
}
size_t InferGradTmpBufferSize(user_op::InferContext* ctx) {
const Shape& value_shape = ctx->InputShape("value_grad", 0);
DataType value_dtype = *ctx->Dtype4ArgNameAndIndex("value_grad", 0);
return value_shape.elem_cnt() * GetSizeOfDataType(value_dtype);
}
} // namespace
#define REGISTER_FUSED_SELF_ATTENTION_QUERY_MUL_KEY_AND_VALUE_GPU_KERNEL(dtype) \
REGISTER_USER_KERNEL("fused_self_attention_query_mul_key_and_value") \
.SetCreateFn<FusedSelfAttentionQueryMulKeyAndValueGpuKernel<dtype>>() \
.SetIsMatchedHob((user_op::HobDeviceTag() == DeviceType::kGPU) \
& (user_op::HobDataType("hidden_states", 0) == GetDataType<dtype>::value)) \
.SetInferTmpSizeFn(InferTmpBufferSize);
#define REGISTER_FUSED_SELF_ATTENTION_QUERY_MUL_KEY_AND_VALUE_GRAD_GPU_KERNEL(dtype) \
REGISTER_USER_KERNEL("fused_self_attention_query_mul_key_and_value_grad") \
.SetCreateFn<FusedSelfAttentionQueryMulKeyAndValueGradGpuKernel<dtype>>() \
.SetIsMatchedHob((user_op::HobDeviceTag() == DeviceType::kGPU) \
& (user_op::HobDataType("hidden_states", 0) == GetDataType<dtype>::value)) \
.SetInferTmpSizeFn(InferGradTmpBufferSize);
REGISTER_FUSED_SELF_ATTENTION_QUERY_MUL_KEY_AND_VALUE_GPU_KERNEL(float)
REGISTER_FUSED_SELF_ATTENTION_QUERY_MUL_KEY_AND_VALUE_GPU_KERNEL(float16)
REGISTER_FUSED_SELF_ATTENTION_QUERY_MUL_KEY_AND_VALUE_GRAD_GPU_KERNEL(float)
REGISTER_FUSED_SELF_ATTENTION_QUERY_MUL_KEY_AND_VALUE_GRAD_GPU_KERNEL(float16)
} // namespace oneflow
| 5f20fa21d0af5a7f6646c47cb635ee8ebd3a387f.cu | /*
Copyright 2020 The OneFlow Authors. All rights reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
#include "oneflow/core/framework/framework.h"
#include "oneflow/user/kernels/slice_util.h"
#include "oneflow/core/kernel/new_kernel_util.h"
namespace oneflow {
namespace {
inline cublasOperation_t GetCublasOp(char op) {
switch (op) {
case 'n':
case 'N': {
return CUBLAS_OP_N;
}
case 't':
case 'T': {
return CUBLAS_OP_T;
}
case 'c':
case 'C': {
return CUBLAS_OP_C;
}
default: {
UNIMPLEMENTED();
}
}
return CUBLAS_OP_N;
}
template<typename T>
struct CudaDataTypeTrait;
template<>
struct CudaDataTypeTrait<float> {
const static cudaDataType_t value = CUDA_R_32F;
};
template<>
struct CudaDataTypeTrait<half> {
const static cudaDataType_t value = CUDA_R_16F;
};
template<typename T>
void CublasBatchGemm(cublasHandle_t handle, char transa, char transb, int64_t m, int64_t n,
int64_t k, T alpha, const T* a, int64_t lda, int64_t stridea, const T* b,
int64_t ldb, int64_t strideb, T beta, T* c, int64_t ldc, int64_t stridec,
int64_t batch_size) {
cublasOperation_t opa = GetCublasOp(transa);
cublasOperation_t opb = GetCublasOp(transb);
if (CUDA_VERSION >= 9010 && GetCudaSmVersion() >= 500) {
#if CUDA_VERSION >= 9010
cudaDataType_t data_type = CudaDataTypeTrait<T>::value;
OF_CUBLAS_CHECK(cublasGemmStridedBatchedEx(
handle, opa, opb, m, n, k, reinterpret_cast<const void*>(&alpha),
reinterpret_cast<const void*>(a), data_type, lda, stridea, reinterpret_cast<const void*>(b),
data_type, ldb, strideb, reinterpret_cast<const void*>(&beta), reinterpret_cast<void*>(c),
data_type, ldc, stridec, batch_size, data_type, CUBLAS_GEMM_DEFAULT));
#else
UNIMPLEMENTED();
#endif
} else {
cublas_gemmStridedBatched<T>(handle, opa, opb, m, n, k, &alpha, a, ldb, stridea, b, ldb,
strideb, &beta, c, ldc, stridec, batch_size);
}
}
#if CUDA_VERSION >= 9010
template<>
void CublasBatchGemm<half>(cublasHandle_t handle, char transa, char transb, int64_t m, int64_t n,
int64_t k, half alpha, const half* a, int64_t lda, int64_t stridea,
const half* b, int64_t ldb, int64_t strideb, half beta, half* c,
int64_t ldc, int64_t stridec, int64_t batch_size) {
using comp_t = float;
cublasOperation_t opa = GetCublasOp(transa);
cublasOperation_t opb = GetCublasOp(transb);
if (GetCudaSmVersion() >= 500) {
float alpha_f = static_cast<comp_t>(alpha);
float beta_f = static_cast<comp_t>(beta);
#if CUDA_VERSION >= 11000
cublasGemmAlgo_t algo = CUBLAS_GEMM_DEFAULT;
#else
cublasGemmAlgo_t algo = CUBLAS_GEMM_DEFAULT_TENSOR_OP;
#endif
cudaDataType_t data_type = CudaDataTypeTrait<half>::value;
cudaDataType_t comp_type = CudaDataTypeTrait<comp_t>::value;
OF_CUBLAS_CHECK(cublasGemmStridedBatchedEx(
handle, opa, opb, m, n, k, &alpha_f, reinterpret_cast<const void*>(a), data_type, lda,
stridea, reinterpret_cast<const void*>(b), data_type, ldb, strideb, &beta_f,
reinterpret_cast<void*>(c), data_type, ldc, stridec, batch_size, comp_type, algo));
} else {
cublas_gemmStridedBatched<half>(handle, opa, opb, m, n, k, &alpha, a, lda, stridea, b, ldb,
strideb, &beta, c, ldc, stridec, batch_size);
}
}
template<>
void CublasBatchGemm<float16>(cublasHandle_t handle, char transa, char transb, int64_t m, int64_t n,
int64_t k, float16 alpha, const float16* a, int64_t lda,
int64_t stridea, const float16* b, int64_t ldb, int64_t strideb,
float16 beta, float16* c, int64_t ldc, int64_t stridec,
int64_t batch_size) {
CublasBatchGemm<half>(handle, transa, transb, m, n, k, static_cast<half>(alpha),
reinterpret_cast<const half*>(a), lda, stridea,
reinterpret_cast<const half*>(b), ldb, strideb, static_cast<half>(beta),
reinterpret_cast<half*>(c), ldc, stridec, batch_size);
}
#endif // CUDA_VERSION >= 9010
template<typename T>
void BatchedGemm(DeviceCtx* ctx, char opa, char opb, int64_t m, int64_t n, int64_t k, float alpha,
const T* a, int64_t lda, int64_t stridea, const T* b, int64_t ldb, int64_t strideb,
float beta, T* c, int64_t ldc, int64_t stridec, int64_t batch_size) {
// swap m and n, a and b to convert from row-major to col-major
CublasBatchGemm<T>(ctx->cublas_pmh_handle(), opb, opa, n, m, k, static_cast<T>(alpha), b, ldb,
strideb, a, lda, stridea, static_cast<T>(beta), c, ldc, stridec, batch_size);
}
SliceParams ConstructSliceParams4Value(int64_t seq_len, int64_t batch_size, int64_t num_heads,
int64_t head_size) {
// slice (s, b, n, 3, h) to (s, b, n, 1, h)
SliceParams params;
std::memset(¶ms, 0, sizeof(SliceParams));
params.ndim = 4;
params.dims[0] = seq_len;
params.dims[1] = batch_size;
params.dims[2] = num_heads;
params.dims[3] = 3 * head_size;
params.start[0] = 0;
params.start[1] = 0;
params.start[2] = 0;
params.start[3] = 2 * head_size;
params.step[0] = 1;
params.step[1] = 1;
params.step[2] = 1;
params.step[3] = 1;
params.size[0] = seq_len;
params.size[1] = batch_size;
params.size[2] = num_heads;
params.size[3] = head_size;
return params;
}
template<typename T>
void TransposeGpu(DeviceCtx* ctx, const ShapeView& in_shape, const ShapeView& out_shape,
const std::vector<int32_t>& perm, const T* in, T* out) {
CHECK_EQ(in_shape.NumAxes(), out_shape.NumAxes());
int32_t num_axes = in_shape.NumAxes();
CHECK_EQ(num_axes, perm.size());
for (int i = 0; i < perm.size(); ++i) { CHECK_EQ(in_shape.At(perm[i]), out_shape.At(i)); }
int64_t elem_cnt = in_shape.elem_cnt();
NewKernelUtil<DeviceType::kGPU>::Transpose(ctx, num_axes, in_shape, out_shape, perm, elem_cnt, in,
out);
}
template<typename T>
class FusedSelfAttentionQueryMulKeyAndValueGpuKernel final : public user_op::OpKernel {
public:
FusedSelfAttentionQueryMulKeyAndValueGpuKernel() = default;
~FusedSelfAttentionQueryMulKeyAndValueGpuKernel() override = default;
private:
void Compute(user_op::KernelComputeContext* ctx) const override {
const user_op::Tensor* h_tensor = ctx->Tensor4ArgNameAndIndex("hidden_states", 0);
int64_t seq_len = h_tensor->shape().At(0);
int64_t batch_size = h_tensor->shape().At(1);
int64_t hidden_size = h_tensor->shape().At(2);
int64_t head_size = ctx->Attr<int64_t>("head_size");
int64_t num_heads = hidden_size / (3 * head_size);
int64_t ld = batch_size * hidden_size;
int64_t stride = 3 * head_size;
int64_t k_offset = head_size;
// q * k: (sq, b, n, h) x (sk, b, n, h) => (b, n, sq, h) x (b, n, sk, h)
// => (b, n, sq, h) x (b, n, h, sk) -> (b, n, sq, sk)
float alpha = ctx->Attr<float>("alpha");
user_op::Tensor* qmk_tensor = ctx->Tensor4ArgNameAndIndex("query_mul_key", 0);
const T* q_dptr = h_tensor->dptr<T>();
const T* k_dptr = h_tensor->dptr<T>() + k_offset;
BatchedGemm<T>(ctx->device_ctx(), 'N', 'T', seq_len, seq_len, head_size, alpha, q_dptr, ld,
stride, k_dptr, ld, stride, 0.0f, qmk_tensor->mut_dptr<T>(), seq_len,
seq_len * seq_len, batch_size * num_heads);
// slice v
user_op::Tensor* tmp_v_tensor = ctx->Tensor4ArgNameAndIndex("tmp_buffer", 0);
user_op::Tensor* v_tensor = ctx->Tensor4ArgNameAndIndex("value", 0);
SliceParams params = ConstructSliceParams4Value(seq_len, batch_size, num_heads, head_size);
SliceKernelUtil<DeviceType::kGPU, T>::Forward(ctx->device_ctx(), params, h_tensor->dptr<T>(),
tmp_v_tensor->mut_dptr<T>());
// v from (s, b, n, h) transpose to (b, n, s, h)
Shape value_shape({seq_len, batch_size, num_heads, head_size});
TransposeGpu<T>(ctx->device_ctx(), value_shape, v_tensor->shape(), {1, 2, 0, 3},
tmp_v_tensor->dptr<T>(), v_tensor->mut_dptr<T>());
}
bool AlwaysComputeWhenAllOutputsEmpty() const override { return false; }
};
template<typename T>
class FusedSelfAttentionQueryMulKeyAndValueGradGpuKernel final : public user_op::OpKernel {
public:
FusedSelfAttentionQueryMulKeyAndValueGradGpuKernel() = default;
~FusedSelfAttentionQueryMulKeyAndValueGradGpuKernel() override = default;
private:
void Compute(user_op::KernelComputeContext* ctx) const override {
const user_op::Tensor* v_grad_tensor = ctx->Tensor4ArgNameAndIndex("value_grad", 0);
const user_op::Tensor* qmk_grad_tensor = ctx->Tensor4ArgNameAndIndex("query_mul_key_grad", 0);
const user_op::Tensor* h_tensor = ctx->Tensor4ArgNameAndIndex("hidden_states", 0);
user_op::Tensor* tmp_v_tensor = ctx->Tensor4ArgNameAndIndex("tmp_buffer", 0);
user_op::Tensor* h_grad_tensor = ctx->Tensor4ArgNameAndIndex("hidden_states_grad", 0);
float alpha = ctx->Attr<float>("alpha");
int64_t seq_len = h_grad_tensor->shape().At(0);
int64_t batch_size = h_grad_tensor->shape().At(1);
int64_t hidden_size = h_grad_tensor->shape().At(2);
int64_t num_heads = v_grad_tensor->shape().At(1);
int64_t head_size = v_grad_tensor->shape().At(3);
int64_t ld = batch_size * hidden_size;
int64_t stride = 3 * head_size;
CHECK_EQ(hidden_size, num_heads * stride);
// transpose from (b, n, s, h) to (s, b, n, h)
Shape value_shape({seq_len, batch_size, num_heads, head_size});
TransposeGpu<T>(ctx->device_ctx(), v_grad_tensor->shape(), value_shape, {2, 0, 1, 3},
v_grad_tensor->dptr<T>(), tmp_v_tensor->mut_dptr<T>());
// slice v grad
SliceParams params = ConstructSliceParams4Value(seq_len, batch_size, num_heads, head_size);
SliceKernelUtil<DeviceType::kGPU, T>::Backward(
ctx->device_ctx(), params, tmp_v_tensor->dptr<T>(), h_grad_tensor->mut_dptr<T>());
// grad_q = grad_qmk * k
// (b, n, sq, sk) x (b, n, sk, h) -> (b, n, s, h) <= (s, b, n, h) <= (s, b, n, 3, h)
const T* qmk_grad_dptr = qmk_grad_tensor->dptr<T>();
const T* k_dptr = h_tensor->dptr<T>() + head_size;
T* grad_q_dptr = h_grad_tensor->mut_dptr<T>();
BatchedGemm<T>(ctx->device_ctx(), 'N', 'N', seq_len, head_size, seq_len, alpha, qmk_grad_dptr,
seq_len, seq_len * seq_len, k_dptr, ld, stride, 0.0f, grad_q_dptr, ld, stride,
batch_size * num_heads);
// grad_k = grad_qmk * q
// (b, n, sk, sq) x (b, n, sq, h) -> (b, n, sk, h) <= (s, b, n, h) <= (s, b, n, 3, h)
const T* q_dptr = h_tensor->dptr<T>();
T* grad_k_dptr = h_grad_tensor->mut_dptr<T>() + head_size;
BatchedGemm<T>(ctx->device_ctx(), 'T', 'N', seq_len, head_size, seq_len, alpha, qmk_grad_dptr,
seq_len, seq_len * seq_len, q_dptr, ld, stride, 0.0f, grad_k_dptr, ld, stride,
batch_size * num_heads);
}
bool AlwaysComputeWhenAllOutputsEmpty() const override { return false; }
};
size_t InferTmpBufferSize(user_op::InferContext* ctx) {
const Shape* value_shape = ctx->OutputShape("value", 0);
DataType value_dtype = *ctx->Dtype4ArgNameAndIndex("value", 0);
return value_shape->elem_cnt() * GetSizeOfDataType(value_dtype);
}
size_t InferGradTmpBufferSize(user_op::InferContext* ctx) {
const Shape& value_shape = ctx->InputShape("value_grad", 0);
DataType value_dtype = *ctx->Dtype4ArgNameAndIndex("value_grad", 0);
return value_shape.elem_cnt() * GetSizeOfDataType(value_dtype);
}
} // namespace
#define REGISTER_FUSED_SELF_ATTENTION_QUERY_MUL_KEY_AND_VALUE_GPU_KERNEL(dtype) \
REGISTER_USER_KERNEL("fused_self_attention_query_mul_key_and_value") \
.SetCreateFn<FusedSelfAttentionQueryMulKeyAndValueGpuKernel<dtype>>() \
.SetIsMatchedHob((user_op::HobDeviceTag() == DeviceType::kGPU) \
& (user_op::HobDataType("hidden_states", 0) == GetDataType<dtype>::value)) \
.SetInferTmpSizeFn(InferTmpBufferSize);
#define REGISTER_FUSED_SELF_ATTENTION_QUERY_MUL_KEY_AND_VALUE_GRAD_GPU_KERNEL(dtype) \
REGISTER_USER_KERNEL("fused_self_attention_query_mul_key_and_value_grad") \
.SetCreateFn<FusedSelfAttentionQueryMulKeyAndValueGradGpuKernel<dtype>>() \
.SetIsMatchedHob((user_op::HobDeviceTag() == DeviceType::kGPU) \
& (user_op::HobDataType("hidden_states", 0) == GetDataType<dtype>::value)) \
.SetInferTmpSizeFn(InferGradTmpBufferSize);
REGISTER_FUSED_SELF_ATTENTION_QUERY_MUL_KEY_AND_VALUE_GPU_KERNEL(float)
REGISTER_FUSED_SELF_ATTENTION_QUERY_MUL_KEY_AND_VALUE_GPU_KERNEL(float16)
REGISTER_FUSED_SELF_ATTENTION_QUERY_MUL_KEY_AND_VALUE_GRAD_GPU_KERNEL(float)
REGISTER_FUSED_SELF_ATTENTION_QUERY_MUL_KEY_AND_VALUE_GRAD_GPU_KERNEL(float16)
} // namespace oneflow
|
6233be7907be506bbb6152eb909b6e8fec004642.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/* NiuTrans.Tensor - an open-source tensor library
* Copyright (C) 2017, Natural Language Processing Lab, Northestern University.
* All rights reserved.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
/*
* $Created by: LI Yinqiao ([email protected]) 2018-7-11
*/
#include "../../XDevice.h"
#include "../../XTensor.h"
#include "Sign.h"
#include "Sign.cuh"
namespace nts { // namespace nts(NiuTrans.Tensor)
#ifdef USE_ROCM
/*
set each entry to its sign value (CUDA Kernel)
>> a - pointer to input data array
>> b - pointer to output data array
>> size - size of the data array
*/
__global__
void KernelSign(DTYPE * a, DTYPE * b, int size)
{
int i = blockDim.x * blockIdx.x + threadIdx.x;
if (i < size) {
if (a[i] > 0)
b[i] = 1.0F;
else if (a[i] == 0)
b[i] = 0.0F;
else
b[i] = -1.0F;
}
}
/*
set each entry to its sign value with float16 data type value (CUDA Kernel)
This is for float16 computation
>> a - pointer to input data array
>> b - pointer to output data array
>> size - size of the data array
*/
__global__
void KernelSign(__half * a, __half * b, int size)
{
return;
}
/*
set each entry to its sign value
>> a - input tensor we are processing
>> b - output tensor we are processing
*/
void _CudaSign(const XTensor * a, XTensor * b)
{
CheckNTErrors((XTensor::IsSameShaped(a, b)), "Input tensors should have the same type!");
CheckNTErrors((a->isSparse == false), "TODO!");
int gridSize[3];
int blockSize[3];
GDevs.GetCudaThread(a->devID, a->unitNum, gridSize, blockSize);
dim3 blocks(gridSize[0]);
dim3 threads(blockSize[0]);
int devIDBackup;
ProtectCudaDev(a->devID, devIDBackup);
if (a->dataType == DEFAULT_DTYPE) {
KernelSign << <blocks, threads >> >((DTYPE*)a->data, (DTYPE*)b->data, a->unitNum);
}
else if (a->dataType == X_FLOAT16) {
KernelSign << <blocks, threads >> >((__half*)a->data, (__half*)b->data, a->unitNum);
}
else {
ShowNTErrors("TODO!");
}
BacktoCudaDev(a->devID, devIDBackup);
}
#endif // USE_ROCM
} // namespace nts(NiuTrans.Tensor)
| 6233be7907be506bbb6152eb909b6e8fec004642.cu | /* NiuTrans.Tensor - an open-source tensor library
* Copyright (C) 2017, Natural Language Processing Lab, Northestern University.
* All rights reserved.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
/*
* $Created by: LI Yinqiao ([email protected]) 2018-7-11
*/
#include "../../XDevice.h"
#include "../../XTensor.h"
#include "Sign.h"
#include "Sign.cuh"
namespace nts { // namespace nts(NiuTrans.Tensor)
#ifdef USE_CUDA
/*
set each entry to its sign value (CUDA Kernel)
>> a - pointer to input data array
>> b - pointer to output data array
>> size - size of the data array
*/
__global__
void KernelSign(DTYPE * a, DTYPE * b, int size)
{
int i = blockDim.x * blockIdx.x + threadIdx.x;
if (i < size) {
if (a[i] > 0)
b[i] = 1.0F;
else if (a[i] == 0)
b[i] = 0.0F;
else
b[i] = -1.0F;
}
}
/*
set each entry to its sign value with float16 data type value (CUDA Kernel)
This is for float16 computation
>> a - pointer to input data array
>> b - pointer to output data array
>> size - size of the data array
*/
__global__
void KernelSign(__half * a, __half * b, int size)
{
return;
}
/*
set each entry to its sign value
>> a - input tensor we are processing
>> b - output tensor we are processing
*/
void _CudaSign(const XTensor * a, XTensor * b)
{
CheckNTErrors((XTensor::IsSameShaped(a, b)), "Input tensors should have the same type!");
CheckNTErrors((a->isSparse == false), "TODO!");
int gridSize[3];
int blockSize[3];
GDevs.GetCudaThread(a->devID, a->unitNum, gridSize, blockSize);
dim3 blocks(gridSize[0]);
dim3 threads(blockSize[0]);
int devIDBackup;
ProtectCudaDev(a->devID, devIDBackup);
if (a->dataType == DEFAULT_DTYPE) {
KernelSign << <blocks, threads >> >((DTYPE*)a->data, (DTYPE*)b->data, a->unitNum);
}
else if (a->dataType == X_FLOAT16) {
KernelSign << <blocks, threads >> >((__half*)a->data, (__half*)b->data, a->unitNum);
}
else {
ShowNTErrors("TODO!");
}
BacktoCudaDev(a->devID, devIDBackup);
}
#endif // USE_CUDA
} // namespace nts(NiuTrans.Tensor)
|
67b1f4005e0978aa33d14a4f2dee58f09f11899b.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/*
-- MAGMA (version 1.5.0) --
Univ. of Tennessee, Knoxville
Univ. of California, Berkeley
Univ. of Colorado, Denver
@date September 2014
@generated from zgecsrmv.cu normal z -> c, Wed Sep 17 15:08:43 2014
*/
#include "common_magma.h"
#if (GPUSHMEM < 200)
#define BLOCK_SIZE 256
#else
#define BLOCK_SIZE 256
#endif
// CSR-SpMV kernel
__global__ void
cgecsrmv_kernel( int num_rows, int num_cols,
magmaFloatComplex alpha,
magmaFloatComplex *d_val,
magma_index_t *d_rowptr,
magma_index_t *d_colind,
magmaFloatComplex *d_x,
magmaFloatComplex beta,
magmaFloatComplex *d_y){
int row = blockIdx.x*blockDim.x+threadIdx.x;
int j;
if(row<num_rows){
magmaFloatComplex dot = MAGMA_C_ZERO;
int start = d_rowptr[ row ];
int end = d_rowptr[ row+1 ];
for( j=start; j<end; j++)
dot += d_val[ j ] * d_x[ d_colind[j] ];
d_y[ row ] = dot *alpha + beta * d_y[ row ];
}
}
// shifted CSR-SpMV kernel
__global__ void
cgecsrmv_kernel_shift( int num_rows, int num_cols,
magmaFloatComplex alpha,
magmaFloatComplex lambda,
magmaFloatComplex *d_val,
magma_index_t *d_rowptr,
magma_index_t *d_colind,
magmaFloatComplex *d_x,
magmaFloatComplex beta,
int offset,
int blocksize,
magma_index_t *add_rows,
magmaFloatComplex *d_y){
int row = blockIdx.x*blockDim.x+threadIdx.x;
int j;
if(row<num_rows){
magmaFloatComplex dot = MAGMA_C_ZERO;
int start = d_rowptr[ row ];
int end = d_rowptr[ row+1 ];
for( j=start; j<end; j++)
dot += d_val[ j ] * d_x[ d_colind[j] ];
if( row<blocksize )
d_y[ row ] = dot * alpha - lambda
* d_x[ offset+row ] + beta * d_y [ row ];
else
d_y[ row ] = dot * alpha - lambda
* d_x[ add_rows[row-blocksize] ] + beta * d_y [ row ];
}
}
/**
Purpose
-------
This routine computes y = alpha * A * x + beta * y on the GPU.
The input format is CSR (val, row, col).
Arguments
---------
@param
transA magma_trans_t
transposition parameter for A
@param
m magma_int_t
number of rows in A
@param
n magma_int_t
number of columns in A
@param
alpha magmaFloatComplex
scalar multiplier
@param
d_val magmaFloatComplex*
array containing values of A in CSR
@param
d_rowptr magma_int_t*
rowpointer of A in CSR
@param
d_colind magma_int_t*
columnindices of A in CSR
@param
d_x magmaFloatComplex*
input vector x
@param
beta magmaFloatComplex
scalar multiplier
@param
d_y magmaFloatComplex*
input/output vector y
@ingroup magmasparse_cblas
********************************************************************/
extern "C" magma_int_t
magma_cgecsrmv( magma_trans_t transA,
magma_int_t m, magma_int_t n,
magmaFloatComplex alpha,
magmaFloatComplex *d_val,
magma_index_t *d_rowptr,
magma_index_t *d_colind,
magmaFloatComplex *d_x,
magmaFloatComplex beta,
magmaFloatComplex *d_y ){
dim3 grid( (m+BLOCK_SIZE-1)/BLOCK_SIZE, 1, 1);
hipLaunchKernelGGL(( cgecsrmv_kernel), dim3(grid), dim3(BLOCK_SIZE), 0, magma_stream ,
m, n, alpha, d_val, d_rowptr, d_colind, d_x, beta, d_y);
return MAGMA_SUCCESS;
}
/**
Purpose
-------
This routine computes y = alpha * ( A -lambda I ) * x + beta * y on the GPU.
It is a shifted version of the CSR-SpMV.
Arguments
---------
@param
transA magma_trans_t
transposition parameter for A
@param
m magma_int_t
number of rows in A
@param
n magma_int_t
number of columns in A
@param
alpha magmaFloatComplex
scalar multiplier
@param
lambda magmaFloatComplex
scalar multiplier
@param
d_val magmaFloatComplex*
array containing values of A in CSR
@param
d_rowptr magma_int_t*
rowpointer of A in CSR
@param
d_colind magma_int_t*
columnindices of A in CSR
@param
d_x magmaFloatComplex*
input vector x
@param
beta magmaFloatComplex
scalar multiplier
@param
offset magma_int_t
in case not the main diagonal is scaled
@param
blocksize magma_int_t
in case of processing multiple vectors
@param
add_rows magma_int_t*
in case the matrixpowerskernel is used
@param
d_y magmaFloatComplex*
output vector y
@ingroup magmasparse_cblas
********************************************************************/
extern "C" magma_int_t
magma_cgecsrmv_shift( magma_trans_t transA,
magma_int_t m, magma_int_t n,
magmaFloatComplex alpha,
magmaFloatComplex lambda,
magmaFloatComplex *d_val,
magma_index_t *d_rowptr,
magma_index_t *d_colind,
magmaFloatComplex *d_x,
magmaFloatComplex beta,
int offset,
int blocksize,
magma_index_t *add_rows,
magmaFloatComplex *d_y ){
dim3 grid( (m+BLOCK_SIZE-1)/BLOCK_SIZE, 1, 1);
hipLaunchKernelGGL(( cgecsrmv_kernel_shift), dim3(grid), dim3(BLOCK_SIZE), 0, magma_stream ,
m, n, alpha, lambda, d_val, d_rowptr, d_colind, d_x,
beta, offset, blocksize, add_rows, d_y);
return MAGMA_SUCCESS;
}
| 67b1f4005e0978aa33d14a4f2dee58f09f11899b.cu | /*
-- MAGMA (version 1.5.0) --
Univ. of Tennessee, Knoxville
Univ. of California, Berkeley
Univ. of Colorado, Denver
@date September 2014
@generated from zgecsrmv.cu normal z -> c, Wed Sep 17 15:08:43 2014
*/
#include "common_magma.h"
#if (GPUSHMEM < 200)
#define BLOCK_SIZE 256
#else
#define BLOCK_SIZE 256
#endif
// CSR-SpMV kernel
__global__ void
cgecsrmv_kernel( int num_rows, int num_cols,
magmaFloatComplex alpha,
magmaFloatComplex *d_val,
magma_index_t *d_rowptr,
magma_index_t *d_colind,
magmaFloatComplex *d_x,
magmaFloatComplex beta,
magmaFloatComplex *d_y){
int row = blockIdx.x*blockDim.x+threadIdx.x;
int j;
if(row<num_rows){
magmaFloatComplex dot = MAGMA_C_ZERO;
int start = d_rowptr[ row ];
int end = d_rowptr[ row+1 ];
for( j=start; j<end; j++)
dot += d_val[ j ] * d_x[ d_colind[j] ];
d_y[ row ] = dot *alpha + beta * d_y[ row ];
}
}
// shifted CSR-SpMV kernel
__global__ void
cgecsrmv_kernel_shift( int num_rows, int num_cols,
magmaFloatComplex alpha,
magmaFloatComplex lambda,
magmaFloatComplex *d_val,
magma_index_t *d_rowptr,
magma_index_t *d_colind,
magmaFloatComplex *d_x,
magmaFloatComplex beta,
int offset,
int blocksize,
magma_index_t *add_rows,
magmaFloatComplex *d_y){
int row = blockIdx.x*blockDim.x+threadIdx.x;
int j;
if(row<num_rows){
magmaFloatComplex dot = MAGMA_C_ZERO;
int start = d_rowptr[ row ];
int end = d_rowptr[ row+1 ];
for( j=start; j<end; j++)
dot += d_val[ j ] * d_x[ d_colind[j] ];
if( row<blocksize )
d_y[ row ] = dot * alpha - lambda
* d_x[ offset+row ] + beta * d_y [ row ];
else
d_y[ row ] = dot * alpha - lambda
* d_x[ add_rows[row-blocksize] ] + beta * d_y [ row ];
}
}
/**
Purpose
-------
This routine computes y = alpha * A * x + beta * y on the GPU.
The input format is CSR (val, row, col).
Arguments
---------
@param
transA magma_trans_t
transposition parameter for A
@param
m magma_int_t
number of rows in A
@param
n magma_int_t
number of columns in A
@param
alpha magmaFloatComplex
scalar multiplier
@param
d_val magmaFloatComplex*
array containing values of A in CSR
@param
d_rowptr magma_int_t*
rowpointer of A in CSR
@param
d_colind magma_int_t*
columnindices of A in CSR
@param
d_x magmaFloatComplex*
input vector x
@param
beta magmaFloatComplex
scalar multiplier
@param
d_y magmaFloatComplex*
input/output vector y
@ingroup magmasparse_cblas
********************************************************************/
extern "C" magma_int_t
magma_cgecsrmv( magma_trans_t transA,
magma_int_t m, magma_int_t n,
magmaFloatComplex alpha,
magmaFloatComplex *d_val,
magma_index_t *d_rowptr,
magma_index_t *d_colind,
magmaFloatComplex *d_x,
magmaFloatComplex beta,
magmaFloatComplex *d_y ){
dim3 grid( (m+BLOCK_SIZE-1)/BLOCK_SIZE, 1, 1);
cgecsrmv_kernel<<< grid, BLOCK_SIZE, 0, magma_stream >>>
(m, n, alpha, d_val, d_rowptr, d_colind, d_x, beta, d_y);
return MAGMA_SUCCESS;
}
/**
Purpose
-------
This routine computes y = alpha * ( A -lambda I ) * x + beta * y on the GPU.
It is a shifted version of the CSR-SpMV.
Arguments
---------
@param
transA magma_trans_t
transposition parameter for A
@param
m magma_int_t
number of rows in A
@param
n magma_int_t
number of columns in A
@param
alpha magmaFloatComplex
scalar multiplier
@param
lambda magmaFloatComplex
scalar multiplier
@param
d_val magmaFloatComplex*
array containing values of A in CSR
@param
d_rowptr magma_int_t*
rowpointer of A in CSR
@param
d_colind magma_int_t*
columnindices of A in CSR
@param
d_x magmaFloatComplex*
input vector x
@param
beta magmaFloatComplex
scalar multiplier
@param
offset magma_int_t
in case not the main diagonal is scaled
@param
blocksize magma_int_t
in case of processing multiple vectors
@param
add_rows magma_int_t*
in case the matrixpowerskernel is used
@param
d_y magmaFloatComplex*
output vector y
@ingroup magmasparse_cblas
********************************************************************/
extern "C" magma_int_t
magma_cgecsrmv_shift( magma_trans_t transA,
magma_int_t m, magma_int_t n,
magmaFloatComplex alpha,
magmaFloatComplex lambda,
magmaFloatComplex *d_val,
magma_index_t *d_rowptr,
magma_index_t *d_colind,
magmaFloatComplex *d_x,
magmaFloatComplex beta,
int offset,
int blocksize,
magma_index_t *add_rows,
magmaFloatComplex *d_y ){
dim3 grid( (m+BLOCK_SIZE-1)/BLOCK_SIZE, 1, 1);
cgecsrmv_kernel_shift<<< grid, BLOCK_SIZE, 0, magma_stream >>>
(m, n, alpha, lambda, d_val, d_rowptr, d_colind, d_x,
beta, offset, blocksize, add_rows, d_y);
return MAGMA_SUCCESS;
}
|
d15c9585b313bc1aabc0a37fecbcfd934d4a309b.hip | // !!! This is a file automatically generated by hipify!!!
#include <stdio.h>
#include <include/labwork.h>
#include <hip/hip_runtime_api.h>
#include <omp.h>
#define ACTIVE_THREADS 4
int main(int argc, char **argv) {
printf("USTH ICT Master 2018, Advanced Programming for HPC.\n");
if (argc < 2) {
printf("Usage: labwork <lwNum> <inputImage>\n");
printf(" lwNum labwork number\n");
printf(" inputImage the input file name, in JPEG format\n");
return 0;
}
int lwNum = atoi(argv[1]);
std::string inputFilename;
// pre-initialize CUDA to avoid incorrect profiling
printf("Warming up...\n");
char *temp;
hipMalloc(&temp, 1024);
Labwork labwork;
if (lwNum != 2 ) {
inputFilename = std::string(argv[2]);
labwork.loadInputImage(inputFilename);
}
printf("Starting labwork %d\n", lwNum);
Timer timer;
timer.start();
switch (lwNum) {
case 1:
labwork.labwork1_CPU();
labwork.saveOutputImage("labwork2-cpu-out.jpg");
printf("labwork 1 CPU ellapsed %.1fms\n", lwNum, timer.getElapsedTimeInMilliSec());
timer.start();
labwork.labwork1_OpenMP();
printf("labwork 1 OpenMP ellapsed %.1fms\n", lwNum, timer.getElapsedTimeInMilliSec());
labwork.saveOutputImage("labwork2-openmp-out.jpg");
break;
case 2:
labwork.labwork2_GPU();
break;
case 3:
labwork.labwork3_GPU();
labwork.saveOutputImage("labwork3-gpu-out.jpg");
break;
case 4:
labwork.labwork4_GPU();
labwork.saveOutputImage("labwork4-gpu-out.jpg");
break;
case 5:
labwork.labwork5_CPU();
printf("labwork 5 CPU ellapsed %.1fms\n", lwNum, timer.getElapsedTimeInMilliSec());
labwork.saveOutputImage("labwork5-cpu-out.jpg");
timer.start();
labwork.labwork5_GPU();
printf("labwork 5 GPU ellapsed %.1fms\n", lwNum, timer.getElapsedTimeInMilliSec());
labwork.saveOutputImage("labwork5-gpu-out.jpg");
break;
case 6:
labwork.labwork6_GPU();
labwork.saveOutputImage("labwork6-gpu-out.jpg");
break;
case 7:
labwork.labwork7_GPU();
printf("[ALGO ONLY] labwork %d ellapsed %.1fms\n", lwNum, timer.getElapsedTimeInMilliSec());
labwork.saveOutputImage("labwork7-gpu-out.jpg");
break;
case 8:
labwork.labwork8_GPU();
printf("[ALGO ONLY] labwork %d ellapsed %.1fms\n", lwNum, timer.getElapsedTimeInMilliSec());
labwork.saveOutputImage("labwork8-gpu-out.jpg");
break;
case 9:
labwork.labwork9_GPU();
printf("[ALGO ONLY] labwork %d ellapsed %.1fms\n", lwNum, timer.getElapsedTimeInMilliSec());
labwork.saveOutputImage("labwork9-gpu-out.jpg");
break;
case 10:
labwork.labwork10_GPU();
printf("[ALGO ONLY] labwork %d ellapsed %.1fms\n", lwNum, timer.getElapsedTimeInMilliSec());
labwork.saveOutputImage("labwork10-gpu-out.jpg");
break;
}
printf("labwork %d ellapsed %.1fms\n", lwNum, timer.getElapsedTimeInMilliSec());
}
void Labwork::loadInputImage(std::string inputFileName) {
inputImage = jpegLoader.load(inputFileName);
}
void Labwork::saveOutputImage(std::string outputFileName) {
jpegLoader.save(outputFileName, outputImage, inputImage->width, inputImage->height, 90);
}
void Labwork::labwork1_CPU() {
int pixelCount = inputImage->width * inputImage->height;
outputImage = static_cast<char *>(malloc(pixelCount * 3));
for (int j = 0; j < 100; j++) { // let's do it 100 times, otherwise it's too fast!
for (int i = 0; i < pixelCount; i++) {
outputImage[i * 3] = (char) (((int) inputImage->buffer[i * 3] + (int) inputImage->buffer[i * 3 + 1] +
(int) inputImage->buffer[i * 3 + 2]) / 3);
outputImage[i * 3 + 1] = outputImage[i * 3];
outputImage[i * 3 + 2] = outputImage[i * 3];
}
}
}
void Labwork::labwork1_OpenMP() {
int pixelCount = inputImage->width * inputImage->height;
outputImage = static_cast<char *>(malloc(pixelCount * 3));
#pragma omp parallel for schedule(dynamic)
for (int j = 0; j < 100; j++) { // let's do it 100 times, otherwise it's too fast!
for (int i = 0; i < pixelCount; i++) {
outputImage[i * 3] = (char) (((int) inputImage->buffer[i * 3] + (int) inputImage->buffer[i * 3 + 1] +
(int) inputImage->buffer[i * 3 + 2]) / 3);
outputImage[i * 3 + 1] = outputImage[i * 3];
outputImage[i * 3 + 2] = outputImage[i * 3];
}
}
}
int getSPcores(hipDeviceProp_t devProp) {
int cores = 0;
int mp = devProp.multiProcessorCount;
switch (devProp.major) {
case 2: // Fermi
if (devProp.minor == 1) cores = mp * 48;
else cores = mp * 32;
break;
case 3: // Kepler
cores = mp * 192;
break;
case 5: // Maxwell
cores = mp * 128;
break;
case 6: // Pascal
if (devProp.minor == 1) cores = mp * 128;
else if (devProp.minor == 0) cores = mp * 64;
else printf("Unknown device type\n");
break;
default:
printf("Unknown device type\n");
break;
}
return cores;
}
void Labwork::labwork2_GPU() {
int nDevices = 0;
// get all devices
hipGetDeviceCount(&nDevices);
printf("Number total of GPU : %d\n\n", nDevices);
for (int i = 0; i < nDevices; i++){
// get informations from individual device
hipDeviceProp_t prop;
hipGetDeviceProperties(&prop, i);
// something more here
printf("GPU #%d\n", i);
printf("GPU name: %s\n", prop.name);
printf("Clock rate: %d\n", prop.clockRate);
printf("Number of cores: %d\n", getSPcores(prop));
printf("Number of multiprocessors: %d\n", prop.multiProcessorCount);
printf("Warp Size: %d\n", prop.warpSize);
printf("Memory Clock Rate: %d\n", prop.memoryClockRate);
printf("Memory Bus Width (bits): %d\nDevices", prop.memoryBusWidth);
printf("Peak Memory Bandwidth (GB/s): %f\n\n", 2.0*prop.memoryClockRate*(prop.memoryBusWidth/8)/1.0e6);
}
}
// Write a grayscale kernel
__global__ void grayscale(uchar3 *input, uchar3 *output) {
// this will execute in a device core
int tid = threadIdx.x + blockIdx.x * blockDim.x;
output[tid].x = (input[tid].x + input[tid].y +input[tid].z) / 3;
output[tid].z = output[tid].y = output[tid].x;
}
void Labwork::labwork3_GPU() {
// Calculate number of pixels
int pixelCount = inputImage->width * inputImage->height;
// Allocate CUDA memory
uchar3 *devInput;
uchar3 *devOutput;
hipMalloc(&devInput, pixelCount *sizeof(uchar3));
hipMalloc(&devOutput, pixelCount *sizeof(uchar3));
// Copy InputImage from CPU (host) to GPU (device)
hipMemcpy(devInput, inputImage->buffer, pixelCount * sizeof(uchar3),hipMemcpyHostToDevice);
// Processing : launch the kernel
int blockSize = 1024;
int numBlock = pixelCount / blockSize;
hipLaunchKernelGGL(( grayscale), dim3(numBlock), dim3(blockSize), 0, 0, devInput, devOutput);
// Copy CUDA Memory from GPU to CPU
// allocate memory for the output on the host
outputImage = static_cast<char *>(malloc(pixelCount * sizeof(uchar3)));
hipMemcpy(outputImage, devOutput, pixelCount * sizeof(uchar3),hipMemcpyDeviceToHost);
// Cleaning
hipFree(devInput);
hipFree(devOutput);
}
__global__ void grayscaleVer2D(uchar3* input, uchar3* output, int imageWidth, int imageHeight){
int tidx = threadIdx.x + blockIdx.x * blockDim.x;
int tidy = threadIdx.y + blockIdx.y * blockDim.y;
if(tidx >= imageWidth || tidy >= imageHeight) return;
int tid = tidx + tidy * imageWidth;
output[tid].x = (input[tid].x + input[tid].y + input[tid].z) / 3;
output[tid].z = output[tid].y = output[tid].x;
}
void Labwork::labwork4_GPU(){
// Calculate number of pixels
int pixelCount = inputImage->width * inputImage->height;
// // Allocate CUDA memory
uchar3 *devInput;
uchar3 *devOutput;
hipMalloc(&devInput, pixelCount *sizeof(uchar3));
hipMalloc(&devOutput, pixelCount *sizeof(uchar3));
// // Copy InputImage from CPU (host) to GPU (device)
hipMemcpy(devInput, inputImage->buffer, pixelCount * sizeof(uchar3),hipMemcpyHostToDevice);
// // Processing : launch the kernel
// // int blockSize = 1024;
// // int numBlock = pixelCount / blockSize;
// //hipLaunchKernelGGL(( grayscale), dim3(numBlock), dim3(blockSize), 0, 0, devInput, devOutput);
dim3 blockSize = dim3(32, 32);
// //dim3 gridSize = dim3(8, 8);
dim3 gridSize = dim3((inputImage->width + blockSize.x -1) / blockSize.x, (inputImage->height + blockSize.y -1) / blockSize.y);
hipLaunchKernelGGL(( grayscaleVer2D), dim3(gridSize), dim3(blockSize), 0, 0, devInput, devOutput, inputImage->width, inputImage->height);
// // Copy CUDA Memory from GPU to CPU
// // allocate memory for the output on the host
outputImage = static_cast<char *>(malloc(pixelCount * sizeof(uchar3)));
hipMemcpy(outputImage, devOutput, pixelCount * sizeof(uchar3),hipMemcpyDeviceToHost);
// // Cleaning
hipFree(devInput);
hipFree(devOutput);
}
void Labwork::labwork5_CPU() {
int kernel[] = {0, 0, 1, 2, 1, 0, 0,
0, 3, 13, 22, 13, 3, 0,
1, 13, 59, 97, 59, 13, 1,
2, 22, 97, 159, 97, 22, 2,
1, 13, 59, 97, 59, 13, 1,
0, 3, 13, 22, 13, 3, 0,
0, 0, 1, 2, 1, 0, 0};
// Calculate number of pixels
int pixelCount = inputImage->width * inputImage->height;
outputImage = static_cast<char *>(malloc(pixelCount * sizeof(uchar3)));
for(int rows = 0; rows < inputImage->height; rows++) {
for (int columns = 0; columns < inputImage->width; columns++){
int sum = 0; // sum is for normalization
int constant = 0;
for(int y=-3; y <= 3; y++){
for(int x=-3; x <= 3; x++){
int tempx = columns + x;
int tempy = rows + y;
if( tempx < 0 || tempx >= inputImage->width || tempy < 0 || tempy >= inputImage->height) continue;
int tid = tempx + tempy*inputImage->width;
char pixelValue = (char) (((int) inputImage->buffer[tid * 3] + (int) inputImage->buffer[tid * 3 + 1] +
(int) inputImage->buffer[tid * 3 + 2]) / 3);
int coefficient = kernel[(y+3)*7+x+3];
sum += pixelValue*coefficient;
constant += coefficient;
}
}
sum /= constant;
int positionOut = rows*inputImage->width + columns;
if(positionOut < pixelCount){
outputImage[positionOut * 3] = outputImage[positionOut * 3 + 1] = outputImage[positionOut * 3 + 2] = sum;
}
}
}
}
// write a blur kernel for shared memory
__global__ void blur(uchar3* input, uchar3* output, int* kernel, int imageWidth, int imageHeight){
int tidx = threadIdx.x + blockIdx.x * blockDim.x;
int tidy = threadIdx.y + blockIdx.y * blockDim.y;
if(tidx >= imageWidth || tidy >= imageHeight) return;
int tid = tidx + tidy * imageWidth;
__shared__ int sKernel[49];
int localtid = threadIdx.x + threadIdx.y * blockDim.x;
if (localtid < 49){
sKernel[localtid] = kernel[localtid];
}
__syncthreads();
int sum = 0; // sum is for normalization
int constant = 0;
for(int y=-3; y < 3; y++){
for(int x=-3; x < 3; x++){
int rows = tidx + x;
int columns = tidy + y;
if( rows < 0 || rows >= imageWidth || columns < 0 || columns >= imageHeight) continue;
int tid = rows + columns * imageWidth;
unsigned char pixelValue = (input[tid].x + input[tid].y +input[tid].z) / 3;
int coefficient = sKernel[(y+3)*7+x+3];
sum += pixelValue*coefficient;
constant += coefficient;
}
}
sum /= constant;
// int positionOut = y*inputImage->width + x;
// if(positionOut < pixelCount){
// outputImage[positionOut * 3] = outputImage[positionOut * 3 + 1] = outputImage[positionOut * 3 + 2] = sum;
// }
output[tid].z = output[tid].y = output[tid].x = sum;
}
void Labwork::labwork5_GPU() {
int kernel[] = {0,0,1,2,1,0,0,
0,3,13,22,13,3,0,
1,13,59,97,59,13,1,
2,22,97,159,97,22,2,
1,13,59,97,59,13,1,
0,3,13,22,13,3,0,
0,0,1,2,1,0,0};
int *share;
// Calculate number of pixels
int pixelCount = inputImage->width * inputImage->height;
dim3 blockSize = dim3(32, 32);
//dim3 gridSize = dim3(8, 8);
dim3 gridSize = dim3((inputImage->width + blockSize.x -1) / blockSize.x, (inputImage->height + blockSize.y -1) / blockSize.y);
// Allocate CUDA memory
uchar3 *devInput;
uchar3 *devOutput;
hipMalloc(&devInput, pixelCount *sizeof(uchar3));
hipMalloc(&devOutput, pixelCount *sizeof(uchar3));
hipMalloc(&share, sizeof(kernel));
// allocate memory for the output on the host
outputImage = static_cast<char *>(malloc(pixelCount * sizeof(uchar3)));
// Copy InputImage from CPU (host) to GPU (device)
hipMemcpy(devInput, inputImage->buffer, pixelCount * sizeof(uchar3),hipMemcpyHostToDevice);
// Copy Kernel into shared memory
hipMemcpy(share, kernel, sizeof(kernel), hipMemcpyHostToDevice);
// Processing : launch the kernel
// int blockSize = 1024;
// int numBlock = pixelCount / blockSize;
hipLaunchKernelGGL(( blur), dim3(gridSize), dim3(blockSize), 0, 0, devInput, devOutput, share, inputImage->width, inputImage->height);
// Copy CUDA Memory from GPU to CPU
hipMemcpy(outputImage, devOutput, pixelCount * sizeof(uchar3),hipMemcpyDeviceToHost);
// // Cleaning
hipFree(devInput);
hipFree(devOutput);
hipFree(share);
}
__global__ void binarization(uchar3* input, uchar3* output, int imageWidth, int imageHeight, int thresholdValue){
int tidx = threadIdx.x + blockIdx.x * blockDim.x;
int tidy = threadIdx.y + blockIdx.y * blockDim.y;
if(tidx >= imageWidth || tidy >= imageHeight) return;
int tid = tidx + tidy * imageWidth;
unsigned char binary = (input[tid].x + input[tid].y + input[tid].z) / 3;
if (binary > thresholdValue){
binary = 255;
} else {
binary = 0;
}
output[tid].z = output[tid].y = output[tid].x = binary;
}
__global__ void brightness(uchar3* input, uchar3* output, int imageWidth, int imageHeight, int brightnessValue){
int tidx = threadIdx.x + blockIdx.x * blockDim.x;
int tidy = threadIdx.y + blockIdx.y * blockDim.y;
if(tidx >= imageWidth || tidy >= imageHeight) return;
int tid = tidx + tidy * imageWidth;
unsigned char binary = (input[tid].x + input[tid].y + input[tid].z) / 3;
// unsigned char increase = binary + brightnessValue;
// if (increase > 255){
// increase = 255;
// } else {
// increase = 0;
// }
binary += brightnessValue;
output[tid].z = output[tid].y = output[tid].x = binary;
}
__global__ void blending(uchar3* input0, uchar3* input1, uchar3* output, int imageWidth, int imageHeight, float weightValue){
int tidx = threadIdx.x + blockIdx.x * blockDim.x;
int tidy = threadIdx.y + blockIdx.y * blockDim.y;
if(tidx >= imageWidth || tidy >= imageHeight) return;
int tid = tidx + tidy * imageWidth;
// unsigned char binary = (input0[tid].x + input0[tid].y + input0[tid].z) / 3;
// unsigned char binary2 = (input1[tid].x + input1[tid].y + input1[tid].z) / 3;
// binary = weightValue*binary + (1-weightValue)*binary2;
float binary = (input0[tid].x + input0[tid].y + input0[tid].z) / 3;
float binary1 = (input1[tid].x + input1[tid].y + input1[tid].z) / 3;
float totalbinary = (binary * weightValue) + binary1 * (1 - weightValue);
output[tid].z = output[tid].y = output[tid].x = totalbinary;
}
void Labwork::labwork6_GPU() {
/*6A - BINARIZATION
int threshold;
printf("Enter the threshold value: ");
scanf("%d", &threshold);
*/
/* 6B - BRIGHTNESS CONTROLL
int bright;
printf("Enter the threshold value: ");
scanf("%d", &bright);
*/
/* 6C - BLENDING
char buffer[3];
printf("Enter the weight: ", buffer);
scanf("%s", buffer);
int weightValue = atoi(buffer);
*/
// Calculate number of pixels
int pixelCount = inputImage->width * inputImage->height;
// // Allocate CUDA memory
uchar3 *devInput;
uchar3 *devOutput;
hipMalloc(&devInput, pixelCount *sizeof(uchar3));
hipMalloc(&devOutput, pixelCount *sizeof(uchar3));
// // Copy InputImage from CPU (host) to GPU (device)
hipMemcpy(devInput, inputImage->buffer, pixelCount * sizeof(uchar3),hipMemcpyHostToDevice);
// // Processing : launch the kernel
// // int blockSize = 1024;
// // int numBlock = pixelCount / blockSize;
// //hipLaunchKernelGGL(( grayscale), dim3(numBlock), dim3(blockSize), 0, 0, devInput, devOutput);
dim3 blockSize = dim3(32, 32);
// //dim3 gridSize = dim3(8, 8);
dim3 gridSize = dim3((inputImage->width + blockSize.x -1) / blockSize.x, (inputImage->height + blockSize.y -1) / blockSize.y);
// 6A - BINARIZATION
// binarization<<<gridSize, blockSize>>>(devInput, devOutput, inputImage->width, inputImage->height, threshold);
// 6B - BRIGHTNESS CONTROLL
//brightness<<<gridSize, blockSize>>>(devInput, devOutput, inputImage->width, inputImage->height, bright);
// 6C - BLENDING
//blending<<<gridSize, blockSize>>>(devInput, devInput, devOutput, inputImage->width, inputImage->height, weightValue);
// // Copy CUDA Memory from GPU to CPU
// // allocate memory for the output on the host
outputImage = static_cast<char *>(malloc(pixelCount * sizeof(uchar3)));
hipMemcpy(outputImage, devOutput, pixelCount * sizeof(uchar3),hipMemcpyDeviceToHost);
// // Cleaning
hipFree(devInput);
hipFree(devOutput);
}
// __global__ void reduce(uchar3* input, uchar3* output, int imageWidth, int imageHeight){
// // dynamic shared memory size, allocated in host
// __shared__ int cache[];
// // cache the block content
// unsigned int localtid = threadIdx.x;
// unsigned int tidx = threadIdx.x + blockIdx.x * blockDim.x;
// unsigned int tidy = threadIdx.y + blockIdx.y * blockDim.y;
// // if(tidx >= imageWidth || tidy >= imageHeight) return;
// int tid = tidx + tidy * imageWidth;
// cache[localtid] = input[tid].x;
// __syncthreads();
// // reduction in cache
// for(int s = 1; s < blockDim.x; s *= 2) {
// if(localtid % (s * 2) == 0) {
// int index = s * 2 * localtid;
// if(index < blockDim.x) {
// cache[tid] += cache[tid + s];
// }
// __syncthreads();
// }
// // only first thread writes back
// if(local == 0) out[blockIdx.x] = cache[0];
// }
void Labwork::labwork7_GPU() {
}
struct hsv {
float *h, *s, *v;
};
__global__ void RGB2HSV(uchar3* input, hsv output, int imageWidth, int imageHeight){
int tidx = threadIdx.x + blockIdx.x * blockDim.x;
int tidy = threadIdx.y + blockIdx.y * blockDim.y;
if(tidx >= imageWidth || tidy >= imageHeight) return;
int tid = tidx + tidy * imageWidth;
float r = (float)input[tid].x/255.0;
float g = (float)input[tid].y/255.0;
float b = (float)input[tid].z/255.0;
float Max = max(r, max(g,b));
float Min = min(r, min(g,b));
float delta = Max - Min;
float h = 0;
float s = 0;
float v = 0;
if (Max != 0){
s = delta/Max;
if (Max == r) h = 60 * fmodf(((g-b)/delta),6.0);
if (Max == g) h = 60 * ((b-r)/delta+2);
if (Max == b) h = 60 * ((r-g)/delta+4);
}
if (Max == 0) s = 0;
if (delta == 0) h = 0;
v = Max;
output.h[tid] = h;
output.s[tid] = s;
output.v[tid] = v;
}
__global__ void HSV2RGB(hsv input, uchar3* output, int imageWidth, int imageHeight){
int tidx = threadIdx.x + blockIdx.x * blockDim.x;
int tidy = threadIdx.y + blockIdx.y * blockDim.y;
if(tidx >= imageWidth || tidy >= imageHeight) return;
int tid = tidx + tidy * imageWidth;
float h = input.h[tid];
float s = input.s[tid];
float v = input.v[tid];
float d = h/60;
float hi = (int)d % 6;
float f = d - hi;
float l = v * (1-s);
float m = v * (1-f*s);
float n = v * (1-(1-f)*s);
float r,g,b;
if (h >= 0 && h < 60){
r = v;
g = n;
b = l;
}
if (h >= 60 && h < 120){
r = m;
g = v;
b = l;
}
if (h >= 120 && h < 180){
r = l;
g = v;
b = n;
}
if (h >= 180 && h < 240){
r = l;
g = m;
b = v;
}
if (h >= 240 && h < 300){
r = n;
g = l;
b = v;
}
if (h >= 300 && h < 360){
r = v;
g = l;
b = m;
}
output[tid].x = r*255;
output[tid].y = g*255;
output[tid].z = b*255;
}
void Labwork::labwork8_GPU() {
// Calculate number of pixels
int pixelCount = inputImage->width * inputImage->height;
// // Allocate CUDA memory
uchar3 *devInput;
uchar3 *devOutput;
hsv devHSV;
hipMalloc((void**)&devHSV.h, pixelCount *sizeof(float));
hipMalloc((void**)&devHSV.s, pixelCount *sizeof(float));
hipMalloc((void**)&devHSV.v, pixelCount *sizeof(float));
hipMalloc(&devInput, pixelCount *sizeof(uchar3));
hipMalloc(&devOutput, pixelCount *sizeof(uchar3));
// // Copy InputImage from CPU (host) to GPU (device)
hipMemcpy(devInput, inputImage->buffer, pixelCount * sizeof(uchar3),hipMemcpyHostToDevice);
// Processing : launch the kernel
// int blockSize = 1024;
// int numBlock = pixelCount / blockSize;
//hipLaunchKernelGGL(( grayscale), dim3(numBlock), dim3(blockSize), 0, 0, devInput, devOutput);
dim3 blockSize = dim3(32, 32);
// //dim3 gridSize = dim3(8, 8);
dim3 gridSize = dim3((inputImage->width + blockSize.x -1) / blockSize.x, (inputImage->height + blockSize.y -1) / blockSize.y);
//hipLaunchKernelGGL(( grayscaleVer2D), dim3(gridSize), dim3(blockSize), 0, 0, devInput, devOutput, inputImage->width, inputImage->height);
hipLaunchKernelGGL(( RGB2HSV), dim3(gridSize), dim3(blockSize), 0, 0, devInput, devHSV, inputImage->width, inputImage->height);
hipLaunchKernelGGL(( HSV2RGB), dim3(gridSize), dim3(blockSize), 0, 0, devHSV, devOutput, inputImage->width, inputImage->height);
// Copy CUDA Memory from GPU to CPU
// allocate memory for the output on the host
outputImage = static_cast<char *>(malloc(pixelCount * sizeof(uchar3)));
hipMemcpy(outputImage, devOutput, pixelCount * sizeof(uchar3),hipMemcpyDeviceToHost);
// Cleaning
hipFree(devInput);
hipFree(devOutput);
hipFree(devHSV.h);
hipFree(devHSV.s);
hipFree(devHSV.v);
}
void Labwork::labwork9_GPU() {
}
__global__ void kuwahara(uchar3* input, uchar3* output, int imageWidth, int imageHeight, int windowSize){
unsigned int tidx = threadIdx.x + blockIdx.x * blockDim.x;
unsigned int tidy = threadIdx.y + blockIdx.y * blockDim.y;
if(tidx >= imageWidth || tidy >= imageHeight) return;
int tid = tidx + tidy * imageWidth;
double window[4] = {0.0};
double SD[4] = {0.0};
int meanRGB[4][3] = {0};
int pxCount[4] = {0};
int windowPosition;
for(int x = 1 - windowSize; x <= windowSize - 1; x++){
for(int y = 1 - windowSize; y <= windowSize - 1; y++){
int rows = tidx + x;
int columns = tidy + y;
if( rows < 0 || rows >= imageWidth || columns < 0 || columns >= imageHeight) continue;
int positionOut = rows + columns * imageWidth;
int red = input[positionOut].x;
int green = input[positionOut].y;
int blue = input[positionOut].z;
if (x <= 0 && y >= 0){
windowPosition = 0; // top left
}
if (x >= 0 && y >= 0){
windowPosition = 1; //top right
}
if (x <= 0 && y <= 0){
windowPosition = 2; // bottom left
}
if (x >= 0 && y <= 0){
windowPosition = 3; // bottom right
}
meanRGB[windowPosition][0] += red;
meanRGB[windowPosition][1] += green;
meanRGB[windowPosition][2] += blue;
window[windowPosition] += max(red, max(green,blue));
pxCount[windowPosition]++;
SD[windowPosition] += pow((max(red, max(green,blue)) - window[windowPosition]),2.0);
}
}
for (int i = 0; i < 4; i++){
SD[i] = sqrt(SD[i] / (pxCount[i]));
window[i] /= pxCount[i];
for(int j = 0; j < 3; j++){
meanRGB[i][j] /= pxCount[i];
}
}
double minSD = min(SD[0], min( SD[1], min(SD[2], SD[3])));
if (minSD == SD[0]) tidx=0;
else if (minSD == SD[1]) tidx=1;
else if (minSD == SD[2]) tidx=2;
else tidx=3;
output[tid].x = meanRGB[tidx][0];
output[tid].y = meanRGB[tidx][1];
output[tid].z = meanRGB[tidx][2];
}
void Labwork::labwork10_GPU(){
// Calculate number of pixels
int pixelCount = inputImage->width * inputImage->height;
int windowSize = 32;
// Allocate CUDA memory
uchar3 *devInput;
uchar3 *devOutput;
hipMalloc(&devInput, pixelCount *sizeof(uchar3));
hipMalloc(&devOutput, pixelCount *sizeof(uchar3));
// Copy InputImage from CPU (host) to GPU (device)
hipMemcpy(devInput, inputImage->buffer, pixelCount * sizeof(uchar3),hipMemcpyHostToDevice);
// Processing : launch the kernel
// int blockSize = 1024;
// int numBlock = pixelCount / blockSize;
//hipLaunchKernelGGL(( grayscale), dim3(numBlock), dim3(blockSize), 0, 0, devInput, devOutput);
dim3 blockSize = dim3(32, 32);
//dim3 gridSize = dim3(8, 8);
dim3 gridSize = dim3((inputImage->width + blockSize.x -1) / blockSize.x, (inputImage->height + blockSize.y -1) / blockSize.y);
hipLaunchKernelGGL(( kuwahara), dim3(gridSize), dim3(blockSize), 0, 0, devInput, devOutput, inputImage->width, inputImage->height, windowSize);
// Copy CUDA Memory from GPU to CPU
// allocate memory for the output on the host
outputImage = static_cast<char *>(malloc(pixelCount * sizeof(uchar3)));
hipMemcpy(outputImage, devOutput, pixelCount * sizeof(uchar3),hipMemcpyDeviceToHost);
// Cleaning
hipFree(devInput);
hipFree(devOutput);
__global__ void sobelGPU(uchar3 *input, uchar3 *output, unsigned char *buffer, int height, int width){
int tidx = threadIdx.x + blockIdx.x * blockDim.x;
int tidy = threadIdx.y + blockIdx.y * blockDim.y;
if (tidx >= width || tidy >= height) return;
int Gx = -buffer[width * (x-1) + (y+1)]
-2 * buffer[width*(x-1) + (y+1)]
-buffer
}
| d15c9585b313bc1aabc0a37fecbcfd934d4a309b.cu | #include <stdio.h>
#include <include/labwork.h>
#include <cuda_runtime_api.h>
#include <omp.h>
#define ACTIVE_THREADS 4
int main(int argc, char **argv) {
printf("USTH ICT Master 2018, Advanced Programming for HPC.\n");
if (argc < 2) {
printf("Usage: labwork <lwNum> <inputImage>\n");
printf(" lwNum labwork number\n");
printf(" inputImage the input file name, in JPEG format\n");
return 0;
}
int lwNum = atoi(argv[1]);
std::string inputFilename;
// pre-initialize CUDA to avoid incorrect profiling
printf("Warming up...\n");
char *temp;
cudaMalloc(&temp, 1024);
Labwork labwork;
if (lwNum != 2 ) {
inputFilename = std::string(argv[2]);
labwork.loadInputImage(inputFilename);
}
printf("Starting labwork %d\n", lwNum);
Timer timer;
timer.start();
switch (lwNum) {
case 1:
labwork.labwork1_CPU();
labwork.saveOutputImage("labwork2-cpu-out.jpg");
printf("labwork 1 CPU ellapsed %.1fms\n", lwNum, timer.getElapsedTimeInMilliSec());
timer.start();
labwork.labwork1_OpenMP();
printf("labwork 1 OpenMP ellapsed %.1fms\n", lwNum, timer.getElapsedTimeInMilliSec());
labwork.saveOutputImage("labwork2-openmp-out.jpg");
break;
case 2:
labwork.labwork2_GPU();
break;
case 3:
labwork.labwork3_GPU();
labwork.saveOutputImage("labwork3-gpu-out.jpg");
break;
case 4:
labwork.labwork4_GPU();
labwork.saveOutputImage("labwork4-gpu-out.jpg");
break;
case 5:
labwork.labwork5_CPU();
printf("labwork 5 CPU ellapsed %.1fms\n", lwNum, timer.getElapsedTimeInMilliSec());
labwork.saveOutputImage("labwork5-cpu-out.jpg");
timer.start();
labwork.labwork5_GPU();
printf("labwork 5 GPU ellapsed %.1fms\n", lwNum, timer.getElapsedTimeInMilliSec());
labwork.saveOutputImage("labwork5-gpu-out.jpg");
break;
case 6:
labwork.labwork6_GPU();
labwork.saveOutputImage("labwork6-gpu-out.jpg");
break;
case 7:
labwork.labwork7_GPU();
printf("[ALGO ONLY] labwork %d ellapsed %.1fms\n", lwNum, timer.getElapsedTimeInMilliSec());
labwork.saveOutputImage("labwork7-gpu-out.jpg");
break;
case 8:
labwork.labwork8_GPU();
printf("[ALGO ONLY] labwork %d ellapsed %.1fms\n", lwNum, timer.getElapsedTimeInMilliSec());
labwork.saveOutputImage("labwork8-gpu-out.jpg");
break;
case 9:
labwork.labwork9_GPU();
printf("[ALGO ONLY] labwork %d ellapsed %.1fms\n", lwNum, timer.getElapsedTimeInMilliSec());
labwork.saveOutputImage("labwork9-gpu-out.jpg");
break;
case 10:
labwork.labwork10_GPU();
printf("[ALGO ONLY] labwork %d ellapsed %.1fms\n", lwNum, timer.getElapsedTimeInMilliSec());
labwork.saveOutputImage("labwork10-gpu-out.jpg");
break;
}
printf("labwork %d ellapsed %.1fms\n", lwNum, timer.getElapsedTimeInMilliSec());
}
void Labwork::loadInputImage(std::string inputFileName) {
inputImage = jpegLoader.load(inputFileName);
}
void Labwork::saveOutputImage(std::string outputFileName) {
jpegLoader.save(outputFileName, outputImage, inputImage->width, inputImage->height, 90);
}
void Labwork::labwork1_CPU() {
int pixelCount = inputImage->width * inputImage->height;
outputImage = static_cast<char *>(malloc(pixelCount * 3));
for (int j = 0; j < 100; j++) { // let's do it 100 times, otherwise it's too fast!
for (int i = 0; i < pixelCount; i++) {
outputImage[i * 3] = (char) (((int) inputImage->buffer[i * 3] + (int) inputImage->buffer[i * 3 + 1] +
(int) inputImage->buffer[i * 3 + 2]) / 3);
outputImage[i * 3 + 1] = outputImage[i * 3];
outputImage[i * 3 + 2] = outputImage[i * 3];
}
}
}
void Labwork::labwork1_OpenMP() {
int pixelCount = inputImage->width * inputImage->height;
outputImage = static_cast<char *>(malloc(pixelCount * 3));
#pragma omp parallel for schedule(dynamic)
for (int j = 0; j < 100; j++) { // let's do it 100 times, otherwise it's too fast!
for (int i = 0; i < pixelCount; i++) {
outputImage[i * 3] = (char) (((int) inputImage->buffer[i * 3] + (int) inputImage->buffer[i * 3 + 1] +
(int) inputImage->buffer[i * 3 + 2]) / 3);
outputImage[i * 3 + 1] = outputImage[i * 3];
outputImage[i * 3 + 2] = outputImage[i * 3];
}
}
}
int getSPcores(cudaDeviceProp devProp) {
int cores = 0;
int mp = devProp.multiProcessorCount;
switch (devProp.major) {
case 2: // Fermi
if (devProp.minor == 1) cores = mp * 48;
else cores = mp * 32;
break;
case 3: // Kepler
cores = mp * 192;
break;
case 5: // Maxwell
cores = mp * 128;
break;
case 6: // Pascal
if (devProp.minor == 1) cores = mp * 128;
else if (devProp.minor == 0) cores = mp * 64;
else printf("Unknown device type\n");
break;
default:
printf("Unknown device type\n");
break;
}
return cores;
}
void Labwork::labwork2_GPU() {
int nDevices = 0;
// get all devices
cudaGetDeviceCount(&nDevices);
printf("Number total of GPU : %d\n\n", nDevices);
for (int i = 0; i < nDevices; i++){
// get informations from individual device
cudaDeviceProp prop;
cudaGetDeviceProperties(&prop, i);
// something more here
printf("GPU #%d\n", i);
printf("GPU name: %s\n", prop.name);
printf("Clock rate: %d\n", prop.clockRate);
printf("Number of cores: %d\n", getSPcores(prop));
printf("Number of multiprocessors: %d\n", prop.multiProcessorCount);
printf("Warp Size: %d\n", prop.warpSize);
printf("Memory Clock Rate: %d\n", prop.memoryClockRate);
printf("Memory Bus Width (bits): %d\nDevices", prop.memoryBusWidth);
printf("Peak Memory Bandwidth (GB/s): %f\n\n", 2.0*prop.memoryClockRate*(prop.memoryBusWidth/8)/1.0e6);
}
}
// Write a grayscale kernel
__global__ void grayscale(uchar3 *input, uchar3 *output) {
// this will execute in a device core
int tid = threadIdx.x + blockIdx.x * blockDim.x;
output[tid].x = (input[tid].x + input[tid].y +input[tid].z) / 3;
output[tid].z = output[tid].y = output[tid].x;
}
void Labwork::labwork3_GPU() {
// Calculate number of pixels
int pixelCount = inputImage->width * inputImage->height;
// Allocate CUDA memory
uchar3 *devInput;
uchar3 *devOutput;
cudaMalloc(&devInput, pixelCount *sizeof(uchar3));
cudaMalloc(&devOutput, pixelCount *sizeof(uchar3));
// Copy InputImage from CPU (host) to GPU (device)
cudaMemcpy(devInput, inputImage->buffer, pixelCount * sizeof(uchar3),cudaMemcpyHostToDevice);
// Processing : launch the kernel
int blockSize = 1024;
int numBlock = pixelCount / blockSize;
grayscale<<<numBlock, blockSize>>>(devInput, devOutput);
// Copy CUDA Memory from GPU to CPU
// allocate memory for the output on the host
outputImage = static_cast<char *>(malloc(pixelCount * sizeof(uchar3)));
cudaMemcpy(outputImage, devOutput, pixelCount * sizeof(uchar3),cudaMemcpyDeviceToHost);
// Cleaning
cudaFree(devInput);
cudaFree(devOutput);
}
__global__ void grayscaleVer2D(uchar3* input, uchar3* output, int imageWidth, int imageHeight){
int tidx = threadIdx.x + blockIdx.x * blockDim.x;
int tidy = threadIdx.y + blockIdx.y * blockDim.y;
if(tidx >= imageWidth || tidy >= imageHeight) return;
int tid = tidx + tidy * imageWidth;
output[tid].x = (input[tid].x + input[tid].y + input[tid].z) / 3;
output[tid].z = output[tid].y = output[tid].x;
}
void Labwork::labwork4_GPU(){
// Calculate number of pixels
int pixelCount = inputImage->width * inputImage->height;
// // Allocate CUDA memory
uchar3 *devInput;
uchar3 *devOutput;
cudaMalloc(&devInput, pixelCount *sizeof(uchar3));
cudaMalloc(&devOutput, pixelCount *sizeof(uchar3));
// // Copy InputImage from CPU (host) to GPU (device)
cudaMemcpy(devInput, inputImage->buffer, pixelCount * sizeof(uchar3),cudaMemcpyHostToDevice);
// // Processing : launch the kernel
// // int blockSize = 1024;
// // int numBlock = pixelCount / blockSize;
// // grayscale<<<numBlock, blockSize>>>(devInput, devOutput);
dim3 blockSize = dim3(32, 32);
// //dim3 gridSize = dim3(8, 8);
dim3 gridSize = dim3((inputImage->width + blockSize.x -1) / blockSize.x, (inputImage->height + blockSize.y -1) / blockSize.y);
grayscaleVer2D<<<gridSize, blockSize>>>(devInput, devOutput, inputImage->width, inputImage->height);
// // Copy CUDA Memory from GPU to CPU
// // allocate memory for the output on the host
outputImage = static_cast<char *>(malloc(pixelCount * sizeof(uchar3)));
cudaMemcpy(outputImage, devOutput, pixelCount * sizeof(uchar3),cudaMemcpyDeviceToHost);
// // Cleaning
cudaFree(devInput);
cudaFree(devOutput);
}
void Labwork::labwork5_CPU() {
int kernel[] = {0, 0, 1, 2, 1, 0, 0,
0, 3, 13, 22, 13, 3, 0,
1, 13, 59, 97, 59, 13, 1,
2, 22, 97, 159, 97, 22, 2,
1, 13, 59, 97, 59, 13, 1,
0, 3, 13, 22, 13, 3, 0,
0, 0, 1, 2, 1, 0, 0};
// Calculate number of pixels
int pixelCount = inputImage->width * inputImage->height;
outputImage = static_cast<char *>(malloc(pixelCount * sizeof(uchar3)));
for(int rows = 0; rows < inputImage->height; rows++) {
for (int columns = 0; columns < inputImage->width; columns++){
int sum = 0; // sum is for normalization
int constant = 0;
for(int y=-3; y <= 3; y++){
for(int x=-3; x <= 3; x++){
int tempx = columns + x;
int tempy = rows + y;
if( tempx < 0 || tempx >= inputImage->width || tempy < 0 || tempy >= inputImage->height) continue;
int tid = tempx + tempy*inputImage->width;
char pixelValue = (char) (((int) inputImage->buffer[tid * 3] + (int) inputImage->buffer[tid * 3 + 1] +
(int) inputImage->buffer[tid * 3 + 2]) / 3);
int coefficient = kernel[(y+3)*7+x+3];
sum += pixelValue*coefficient;
constant += coefficient;
}
}
sum /= constant;
int positionOut = rows*inputImage->width + columns;
if(positionOut < pixelCount){
outputImage[positionOut * 3] = outputImage[positionOut * 3 + 1] = outputImage[positionOut * 3 + 2] = sum;
}
}
}
}
// write a blur kernel for shared memory
__global__ void blur(uchar3* input, uchar3* output, int* kernel, int imageWidth, int imageHeight){
int tidx = threadIdx.x + blockIdx.x * blockDim.x;
int tidy = threadIdx.y + blockIdx.y * blockDim.y;
if(tidx >= imageWidth || tidy >= imageHeight) return;
int tid = tidx + tidy * imageWidth;
__shared__ int sKernel[49];
int localtid = threadIdx.x + threadIdx.y * blockDim.x;
if (localtid < 49){
sKernel[localtid] = kernel[localtid];
}
__syncthreads();
int sum = 0; // sum is for normalization
int constant = 0;
for(int y=-3; y < 3; y++){
for(int x=-3; x < 3; x++){
int rows = tidx + x;
int columns = tidy + y;
if( rows < 0 || rows >= imageWidth || columns < 0 || columns >= imageHeight) continue;
int tid = rows + columns * imageWidth;
unsigned char pixelValue = (input[tid].x + input[tid].y +input[tid].z) / 3;
int coefficient = sKernel[(y+3)*7+x+3];
sum += pixelValue*coefficient;
constant += coefficient;
}
}
sum /= constant;
// int positionOut = y*inputImage->width + x;
// if(positionOut < pixelCount){
// outputImage[positionOut * 3] = outputImage[positionOut * 3 + 1] = outputImage[positionOut * 3 + 2] = sum;
// }
output[tid].z = output[tid].y = output[tid].x = sum;
}
void Labwork::labwork5_GPU() {
int kernel[] = {0,0,1,2,1,0,0,
0,3,13,22,13,3,0,
1,13,59,97,59,13,1,
2,22,97,159,97,22,2,
1,13,59,97,59,13,1,
0,3,13,22,13,3,0,
0,0,1,2,1,0,0};
int *share;
// Calculate number of pixels
int pixelCount = inputImage->width * inputImage->height;
dim3 blockSize = dim3(32, 32);
//dim3 gridSize = dim3(8, 8);
dim3 gridSize = dim3((inputImage->width + blockSize.x -1) / blockSize.x, (inputImage->height + blockSize.y -1) / blockSize.y);
// Allocate CUDA memory
uchar3 *devInput;
uchar3 *devOutput;
cudaMalloc(&devInput, pixelCount *sizeof(uchar3));
cudaMalloc(&devOutput, pixelCount *sizeof(uchar3));
cudaMalloc(&share, sizeof(kernel));
// allocate memory for the output on the host
outputImage = static_cast<char *>(malloc(pixelCount * sizeof(uchar3)));
// Copy InputImage from CPU (host) to GPU (device)
cudaMemcpy(devInput, inputImage->buffer, pixelCount * sizeof(uchar3),cudaMemcpyHostToDevice);
// Copy Kernel into shared memory
cudaMemcpy(share, kernel, sizeof(kernel), cudaMemcpyHostToDevice);
// Processing : launch the kernel
// int blockSize = 1024;
// int numBlock = pixelCount / blockSize;
blur<<<gridSize, blockSize>>>(devInput, devOutput, share, inputImage->width, inputImage->height);
// Copy CUDA Memory from GPU to CPU
cudaMemcpy(outputImage, devOutput, pixelCount * sizeof(uchar3),cudaMemcpyDeviceToHost);
// // Cleaning
cudaFree(devInput);
cudaFree(devOutput);
cudaFree(share);
}
__global__ void binarization(uchar3* input, uchar3* output, int imageWidth, int imageHeight, int thresholdValue){
int tidx = threadIdx.x + blockIdx.x * blockDim.x;
int tidy = threadIdx.y + blockIdx.y * blockDim.y;
if(tidx >= imageWidth || tidy >= imageHeight) return;
int tid = tidx + tidy * imageWidth;
unsigned char binary = (input[tid].x + input[tid].y + input[tid].z) / 3;
if (binary > thresholdValue){
binary = 255;
} else {
binary = 0;
}
output[tid].z = output[tid].y = output[tid].x = binary;
}
__global__ void brightness(uchar3* input, uchar3* output, int imageWidth, int imageHeight, int brightnessValue){
int tidx = threadIdx.x + blockIdx.x * blockDim.x;
int tidy = threadIdx.y + blockIdx.y * blockDim.y;
if(tidx >= imageWidth || tidy >= imageHeight) return;
int tid = tidx + tidy * imageWidth;
unsigned char binary = (input[tid].x + input[tid].y + input[tid].z) / 3;
// unsigned char increase = binary + brightnessValue;
// if (increase > 255){
// increase = 255;
// } else {
// increase = 0;
// }
binary += brightnessValue;
output[tid].z = output[tid].y = output[tid].x = binary;
}
__global__ void blending(uchar3* input0, uchar3* input1, uchar3* output, int imageWidth, int imageHeight, float weightValue){
int tidx = threadIdx.x + blockIdx.x * blockDim.x;
int tidy = threadIdx.y + blockIdx.y * blockDim.y;
if(tidx >= imageWidth || tidy >= imageHeight) return;
int tid = tidx + tidy * imageWidth;
// unsigned char binary = (input0[tid].x + input0[tid].y + input0[tid].z) / 3;
// unsigned char binary2 = (input1[tid].x + input1[tid].y + input1[tid].z) / 3;
// binary = weightValue*binary + (1-weightValue)*binary2;
float binary = (input0[tid].x + input0[tid].y + input0[tid].z) / 3;
float binary1 = (input1[tid].x + input1[tid].y + input1[tid].z) / 3;
float totalbinary = (binary * weightValue) + binary1 * (1 - weightValue);
output[tid].z = output[tid].y = output[tid].x = totalbinary;
}
void Labwork::labwork6_GPU() {
/*6A - BINARIZATION
int threshold;
printf("Enter the threshold value: ");
scanf("%d", &threshold);
*/
/* 6B - BRIGHTNESS CONTROLL
int bright;
printf("Enter the threshold value: ");
scanf("%d", &bright);
*/
/* 6C - BLENDING
char buffer[3];
printf("Enter the weight: ", buffer);
scanf("%s", buffer);
int weightValue = atoi(buffer);
*/
// Calculate number of pixels
int pixelCount = inputImage->width * inputImage->height;
// // Allocate CUDA memory
uchar3 *devInput;
uchar3 *devOutput;
cudaMalloc(&devInput, pixelCount *sizeof(uchar3));
cudaMalloc(&devOutput, pixelCount *sizeof(uchar3));
// // Copy InputImage from CPU (host) to GPU (device)
cudaMemcpy(devInput, inputImage->buffer, pixelCount * sizeof(uchar3),cudaMemcpyHostToDevice);
// // Processing : launch the kernel
// // int blockSize = 1024;
// // int numBlock = pixelCount / blockSize;
// // grayscale<<<numBlock, blockSize>>>(devInput, devOutput);
dim3 blockSize = dim3(32, 32);
// //dim3 gridSize = dim3(8, 8);
dim3 gridSize = dim3((inputImage->width + blockSize.x -1) / blockSize.x, (inputImage->height + blockSize.y -1) / blockSize.y);
// 6A - BINARIZATION
// binarization<<<gridSize, blockSize>>>(devInput, devOutput, inputImage->width, inputImage->height, threshold);
// 6B - BRIGHTNESS CONTROLL
//brightness<<<gridSize, blockSize>>>(devInput, devOutput, inputImage->width, inputImage->height, bright);
// 6C - BLENDING
//blending<<<gridSize, blockSize>>>(devInput, devInput, devOutput, inputImage->width, inputImage->height, weightValue);
// // Copy CUDA Memory from GPU to CPU
// // allocate memory for the output on the host
outputImage = static_cast<char *>(malloc(pixelCount * sizeof(uchar3)));
cudaMemcpy(outputImage, devOutput, pixelCount * sizeof(uchar3),cudaMemcpyDeviceToHost);
// // Cleaning
cudaFree(devInput);
cudaFree(devOutput);
}
// __global__ void reduce(uchar3* input, uchar3* output, int imageWidth, int imageHeight){
// // dynamic shared memory size, allocated in host
// __shared__ int cache[];
// // cache the block content
// unsigned int localtid = threadIdx.x;
// unsigned int tidx = threadIdx.x + blockIdx.x * blockDim.x;
// unsigned int tidy = threadIdx.y + blockIdx.y * blockDim.y;
// // if(tidx >= imageWidth || tidy >= imageHeight) return;
// int tid = tidx + tidy * imageWidth;
// cache[localtid] = input[tid].x;
// __syncthreads();
// // reduction in cache
// for(int s = 1; s < blockDim.x; s *= 2) {
// if(localtid % (s * 2) == 0) {
// int index = s * 2 * localtid;
// if(index < blockDim.x) {
// cache[tid] += cache[tid + s];
// }
// __syncthreads();
// }
// // only first thread writes back
// if(local == 0) out[blockIdx.x] = cache[0];
// }
void Labwork::labwork7_GPU() {
}
struct hsv {
float *h, *s, *v;
};
__global__ void RGB2HSV(uchar3* input, hsv output, int imageWidth, int imageHeight){
int tidx = threadIdx.x + blockIdx.x * blockDim.x;
int tidy = threadIdx.y + blockIdx.y * blockDim.y;
if(tidx >= imageWidth || tidy >= imageHeight) return;
int tid = tidx + tidy * imageWidth;
float r = (float)input[tid].x/255.0;
float g = (float)input[tid].y/255.0;
float b = (float)input[tid].z/255.0;
float Max = max(r, max(g,b));
float Min = min(r, min(g,b));
float delta = Max - Min;
float h = 0;
float s = 0;
float v = 0;
if (Max != 0){
s = delta/Max;
if (Max == r) h = 60 * fmodf(((g-b)/delta),6.0);
if (Max == g) h = 60 * ((b-r)/delta+2);
if (Max == b) h = 60 * ((r-g)/delta+4);
}
if (Max == 0) s = 0;
if (delta == 0) h = 0;
v = Max;
output.h[tid] = h;
output.s[tid] = s;
output.v[tid] = v;
}
__global__ void HSV2RGB(hsv input, uchar3* output, int imageWidth, int imageHeight){
int tidx = threadIdx.x + blockIdx.x * blockDim.x;
int tidy = threadIdx.y + blockIdx.y * blockDim.y;
if(tidx >= imageWidth || tidy >= imageHeight) return;
int tid = tidx + tidy * imageWidth;
float h = input.h[tid];
float s = input.s[tid];
float v = input.v[tid];
float d = h/60;
float hi = (int)d % 6;
float f = d - hi;
float l = v * (1-s);
float m = v * (1-f*s);
float n = v * (1-(1-f)*s);
float r,g,b;
if (h >= 0 && h < 60){
r = v;
g = n;
b = l;
}
if (h >= 60 && h < 120){
r = m;
g = v;
b = l;
}
if (h >= 120 && h < 180){
r = l;
g = v;
b = n;
}
if (h >= 180 && h < 240){
r = l;
g = m;
b = v;
}
if (h >= 240 && h < 300){
r = n;
g = l;
b = v;
}
if (h >= 300 && h < 360){
r = v;
g = l;
b = m;
}
output[tid].x = r*255;
output[tid].y = g*255;
output[tid].z = b*255;
}
void Labwork::labwork8_GPU() {
// Calculate number of pixels
int pixelCount = inputImage->width * inputImage->height;
// // Allocate CUDA memory
uchar3 *devInput;
uchar3 *devOutput;
hsv devHSV;
cudaMalloc((void**)&devHSV.h, pixelCount *sizeof(float));
cudaMalloc((void**)&devHSV.s, pixelCount *sizeof(float));
cudaMalloc((void**)&devHSV.v, pixelCount *sizeof(float));
cudaMalloc(&devInput, pixelCount *sizeof(uchar3));
cudaMalloc(&devOutput, pixelCount *sizeof(uchar3));
// // Copy InputImage from CPU (host) to GPU (device)
cudaMemcpy(devInput, inputImage->buffer, pixelCount * sizeof(uchar3),cudaMemcpyHostToDevice);
// Processing : launch the kernel
// int blockSize = 1024;
// int numBlock = pixelCount / blockSize;
// grayscale<<<numBlock, blockSize>>>(devInput, devOutput);
dim3 blockSize = dim3(32, 32);
// //dim3 gridSize = dim3(8, 8);
dim3 gridSize = dim3((inputImage->width + blockSize.x -1) / blockSize.x, (inputImage->height + blockSize.y -1) / blockSize.y);
// grayscaleVer2D<<<gridSize, blockSize>>>(devInput, devOutput, inputImage->width, inputImage->height);
RGB2HSV<<<gridSize, blockSize>>>(devInput, devHSV, inputImage->width, inputImage->height);
HSV2RGB<<<gridSize, blockSize>>>(devHSV, devOutput, inputImage->width, inputImage->height);
// Copy CUDA Memory from GPU to CPU
// allocate memory for the output on the host
outputImage = static_cast<char *>(malloc(pixelCount * sizeof(uchar3)));
cudaMemcpy(outputImage, devOutput, pixelCount * sizeof(uchar3),cudaMemcpyDeviceToHost);
// Cleaning
cudaFree(devInput);
cudaFree(devOutput);
cudaFree(devHSV.h);
cudaFree(devHSV.s);
cudaFree(devHSV.v);
}
void Labwork::labwork9_GPU() {
}
__global__ void kuwahara(uchar3* input, uchar3* output, int imageWidth, int imageHeight, int windowSize){
unsigned int tidx = threadIdx.x + blockIdx.x * blockDim.x;
unsigned int tidy = threadIdx.y + blockIdx.y * blockDim.y;
if(tidx >= imageWidth || tidy >= imageHeight) return;
int tid = tidx + tidy * imageWidth;
double window[4] = {0.0};
double SD[4] = {0.0};
int meanRGB[4][3] = {0};
int pxCount[4] = {0};
int windowPosition;
for(int x = 1 - windowSize; x <= windowSize - 1; x++){
for(int y = 1 - windowSize; y <= windowSize - 1; y++){
int rows = tidx + x;
int columns = tidy + y;
if( rows < 0 || rows >= imageWidth || columns < 0 || columns >= imageHeight) continue;
int positionOut = rows + columns * imageWidth;
int red = input[positionOut].x;
int green = input[positionOut].y;
int blue = input[positionOut].z;
if (x <= 0 && y >= 0){
windowPosition = 0; // top left
}
if (x >= 0 && y >= 0){
windowPosition = 1; //top right
}
if (x <= 0 && y <= 0){
windowPosition = 2; // bottom left
}
if (x >= 0 && y <= 0){
windowPosition = 3; // bottom right
}
meanRGB[windowPosition][0] += red;
meanRGB[windowPosition][1] += green;
meanRGB[windowPosition][2] += blue;
window[windowPosition] += max(red, max(green,blue));
pxCount[windowPosition]++;
SD[windowPosition] += pow((max(red, max(green,blue)) - window[windowPosition]),2.0);
}
}
for (int i = 0; i < 4; i++){
SD[i] = sqrt(SD[i] / (pxCount[i]));
window[i] /= pxCount[i];
for(int j = 0; j < 3; j++){
meanRGB[i][j] /= pxCount[i];
}
}
double minSD = min(SD[0], min( SD[1], min(SD[2], SD[3])));
if (minSD == SD[0]) tidx=0;
else if (minSD == SD[1]) tidx=1;
else if (minSD == SD[2]) tidx=2;
else tidx=3;
output[tid].x = meanRGB[tidx][0];
output[tid].y = meanRGB[tidx][1];
output[tid].z = meanRGB[tidx][2];
}
void Labwork::labwork10_GPU(){
// Calculate number of pixels
int pixelCount = inputImage->width * inputImage->height;
int windowSize = 32;
// Allocate CUDA memory
uchar3 *devInput;
uchar3 *devOutput;
cudaMalloc(&devInput, pixelCount *sizeof(uchar3));
cudaMalloc(&devOutput, pixelCount *sizeof(uchar3));
// Copy InputImage from CPU (host) to GPU (device)
cudaMemcpy(devInput, inputImage->buffer, pixelCount * sizeof(uchar3),cudaMemcpyHostToDevice);
// Processing : launch the kernel
// int blockSize = 1024;
// int numBlock = pixelCount / blockSize;
// grayscale<<<numBlock, blockSize>>>(devInput, devOutput);
dim3 blockSize = dim3(32, 32);
//dim3 gridSize = dim3(8, 8);
dim3 gridSize = dim3((inputImage->width + blockSize.x -1) / blockSize.x, (inputImage->height + blockSize.y -1) / blockSize.y);
kuwahara<<<gridSize, blockSize>>>(devInput, devOutput, inputImage->width, inputImage->height, windowSize);
// Copy CUDA Memory from GPU to CPU
// allocate memory for the output on the host
outputImage = static_cast<char *>(malloc(pixelCount * sizeof(uchar3)));
cudaMemcpy(outputImage, devOutput, pixelCount * sizeof(uchar3),cudaMemcpyDeviceToHost);
// Cleaning
cudaFree(devInput);
cudaFree(devOutput);
__global__ void sobelGPU(uchar3 *input, uchar3 *output, unsigned char *buffer, int height, int width){
int tidx = threadIdx.x + blockIdx.x * blockDim.x;
int tidy = threadIdx.y + blockIdx.y * blockDim.y;
if (tidx >= width || tidy >= height) return;
int Gx = -buffer[width * (x-1) + (y+1)]
-2 * buffer[width*(x-1) + (y+1)]
-buffer
}
|
5a3363c2ad0859355f3f0c6b80bb69d0f51253b6.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#ifdef _TIMER_
#include "hip/hip_runtime_api.h"
#endif
#include "stdio.h"
#define FORMA_MAX(a,b) ( (a) > (b) ? (a) : (b) )
#define max(a,b) FORMA_MAX(a,b)
#define FORMA_MIN(a,b) ( (a) < (b) ? (a) : (b) )
#define min(a,b) FORMA_MIN(a,b)
#define FORMA_CEIL(a,b) ( (a) % (b) == 0 ? (a) / (b) : ((a) / (b)) + 1 )
#define mod(x,y) ( (x) & (y-1))
#ifndef FORMA_MAX_BLOCKDIM_0
#define FORMA_MAX_BLOCKDIM_0 1024
#endif
#ifndef FORMA_MAX_BLOCKDIM_1
#define FORMA_MAX_BLOCKDIM_1 1024
#endif
#ifndef FORMA_MAX_BLOCKDIM_2
#define FORMA_MAX_BLOCKDIM_2 1024
#endif
#define GAPX (22)
#define GAPY (2)
#define EXTENT (5)
template<typename T>
__global__ void __kernel_init__(T* input, T value)
{
int loc = (int)(blockIdx.x)*(int)(blockDim.x)+(int)(threadIdx.x);
input[loc] = value;
}
template<typename T>
void initialize_array(T* d_input, int size, T value)
{
dim3 init_grid(FORMA_CEIL(size,FORMA_MAX_BLOCKDIM_0));
dim3 init_block(FORMA_MAX_BLOCKDIM_0);
hipLaunchKernelGGL(( __kernel_init__), dim3(init_grid),dim3(init_block), 0, 0, d_input,value);
}
void Check_CUDA_Error(const char* message);
/*Texture references */
/*Shared Memory Variable */
extern __shared__ char __FORMA_SHARED_MEM__[];
/* Device code Begin */
/* X, Y, Z */
__global__ void __kernel___forma_kernel__0__(float * __restrict__ input, int L, int M, int N, float * __restrict__ __copy_arr_0__, float * __restrict__ __copy_arr_1__, float * __restrict__ __copy_arr_2__, int FORMA_BLOCKDIM_X, int FORMA_BLOCKDIM_Y, int FORMA_BLOCKDIM_Z, float * __restrict__ __var_1__){
int __FORMA_SHARED_MEM_OFFSET__ = 0;
float* __tilevar_0__ = (float*)(__FORMA_SHARED_MEM__+__FORMA_SHARED_MEM_OFFSET__);
__FORMA_SHARED_MEM_OFFSET__ += sizeof(float)*(((FORMA_BLOCKDIM_Z+4)*FORMA_BLOCKDIM_Y*FORMA_BLOCKDIM_X));
float* __tilevar_1__ = (float*)(__FORMA_SHARED_MEM__+__FORMA_SHARED_MEM_OFFSET__);
__FORMA_SHARED_MEM_OFFSET__ += sizeof(float)*(((FORMA_BLOCKDIM_Z+4)*FORMA_BLOCKDIM_Y*FORMA_BLOCKDIM_X));
float * __tilevar_2__ = __tilevar_0__;
float * __tilevar_3__ = __tilevar_1__;
float * __tilevar_4__ = __tilevar_0__;
float * __tilevar_5__ = __tilevar_1__;
int rowz = FORMA_BLOCKDIM_Z+4;
int __iter_0__ = (int)(blockIdx.x)*((int)(FORMA_BLOCKDIM_X)+GAPX);
int __iter_1__ = (int)(blockIdx.y)*((int)(FORMA_BLOCKDIM_Y)+GAPY);
for (int __iter_2__ = 0; __iter_2__ <= L-1; __iter_2__ += FORMA_BLOCKDIM_Z) {
int __iter_3__ = FORMA_MAX(__iter_2__,0) + (int)(threadIdx.z) ;
if(__iter_3__ <= FORMA_MIN(((__iter_2__+FORMA_BLOCKDIM_Z)-1),(L-1))) {
int __iter_4__ = FORMA_MAX(__iter_1__,0) + (int)(threadIdx.y) ;
for(; __iter_4__ <= FORMA_MIN(((__iter_1__+FORMA_BLOCKDIM_Y)-1),(M-1)); __iter_4__+=(int)(blockDim.y)) {
int __iter_5__ = FORMA_MAX(__iter_0__,0) + (int)(threadIdx.x) ;
if( __iter_5__ <= FORMA_MIN(((__iter_0__+FORMA_BLOCKDIM_X)-1),(N-1)) ){
__tilevar_2__[__iter_5__-__iter_0__+FORMA_BLOCKDIM_X*(__iter_4__-__iter_1__+FORMA_BLOCKDIM_Y*mod(__iter_3__,rowz))] = input[__iter_5__+N*(__iter_4__+M*(__iter_3__))];
}
}
}
__syncthreads();
int __iter_6__ = FORMA_MAX((__iter_2__-1),1) + (int)(threadIdx.z) ;
if(__iter_6__ <= FORMA_MIN(((__iter_2__+FORMA_BLOCKDIM_Z)-2),(L-2))) {
int __iter_7__ = FORMA_MAX((__iter_1__+1),1) + (int)(threadIdx.y) ;
for(; __iter_7__ <= FORMA_MIN(((__iter_1__+FORMA_BLOCKDIM_Y)-2),(M-2)); __iter_7__+=(int)(blockDim.y)) {
int __iter_8__ = FORMA_MAX((__iter_0__+1),1) + (int)(threadIdx.x) ;
if( __iter_8__ <= FORMA_MIN(((__iter_0__+FORMA_BLOCKDIM_X)-2),(N-2)) ){
float __temp_a3__ = (__tilevar_2__[__iter_8__+1-__iter_0__+FORMA_BLOCKDIM_X*(__iter_7__-__iter_1__+FORMA_BLOCKDIM_Y*mod(__iter_6__,rowz))]);
float __temp_a7__ = (__tilevar_2__[__iter_8__-1-__iter_0__+FORMA_BLOCKDIM_X*(__iter_7__-__iter_1__+FORMA_BLOCKDIM_Y*mod(__iter_6__,rowz))]);
float __temp_a8__ = (0.161f * __temp_a3__ + 0.162f * __temp_a7__);
float __temp_a12__ = (__tilevar_2__[__iter_8__-__iter_0__+FORMA_BLOCKDIM_X*(__iter_7__+1-__iter_1__+FORMA_BLOCKDIM_Y*mod(__iter_6__,rowz))]);
float __temp_a13__ = (__temp_a8__ + 0.163f * __temp_a12__);
float __temp_a17__ = (__tilevar_2__[__iter_8__-__iter_0__+FORMA_BLOCKDIM_X*(__iter_7__-1-__iter_1__+FORMA_BLOCKDIM_Y*mod(__iter_6__,rowz))]);
float __temp_a18__ = (__temp_a13__ + 0.164f * __temp_a17__);
float __temp_a22__ = (__tilevar_2__[__iter_8__-__iter_0__+FORMA_BLOCKDIM_X*(__iter_7__-__iter_1__+FORMA_BLOCKDIM_Y*mod(__iter_6__+1,rowz))]);
float __temp_a23__ = (__temp_a18__ + 0.165f * __temp_a22__);
float __temp_a27__ = (__tilevar_2__[__iter_8__-__iter_0__+FORMA_BLOCKDIM_X*(__iter_7__-__iter_1__+FORMA_BLOCKDIM_Y*mod(__iter_6__-1,rowz))]);
float __temp_a28__ = (__temp_a23__ + 0.166f * __temp_a27__);
float __temp_a32__ = (__tilevar_2__[__iter_8__-__iter_0__+FORMA_BLOCKDIM_X*(__iter_7__-__iter_1__+FORMA_BLOCKDIM_Y*mod(__iter_6__,rowz))]);
float __temp_a33__ = (__temp_a28__ - 1.670f * __temp_a32__);
__tilevar_3__[__iter_8__-__iter_0__+FORMA_BLOCKDIM_X*(__iter_7__-__iter_1__+FORMA_BLOCKDIM_Y*mod(__iter_6__,rowz))] = __temp_a33__;
}
}
}
int __iter_9__ = FORMA_MAX((__iter_2__-1),1) + (int)(threadIdx.z) ;
if(__iter_9__ <= FORMA_MIN(((__iter_2__+FORMA_BLOCKDIM_Z)-2),(L-2))) {
int __iter_10__ = FORMA_MAX((__iter_1__+1),1) + (int)(threadIdx.y) ;
for(; __iter_10__ <= FORMA_MIN(((__iter_1__+FORMA_BLOCKDIM_Y)-2),(M-2)) ; __iter_10__ += (int)(blockDim.y)){
int __iter_11__ = FORMA_MAX((__iter_0__+1),1) + (int)(threadIdx.x) ;
if( __iter_11__ <= FORMA_MIN(((__iter_0__+FORMA_BLOCKDIM_X)-2),(N-2)) ){
if (__iter_10__ < (FORMA_MAX((__iter_1__+1),1)+2) || __iter_10__ > (FORMA_MIN(((__iter_1__+FORMA_BLOCKDIM_Y)-2),(M-2))-2) || __iter_11__ < (FORMA_MAX((__iter_0__+1),1)+2) || __iter_11__ > (FORMA_MIN(((__iter_0__+FORMA_BLOCKDIM_X)-2),(N-2))-2)) {
__copy_arr_0__[__iter_11__+N*(__iter_10__+M*(__iter_9__))] = __tilevar_3__[__iter_11__-__iter_0__+FORMA_BLOCKDIM_X*(__iter_10__-__iter_1__+FORMA_BLOCKDIM_Y*mod(__iter_9__,rowz))];
}
}
}
}
__syncthreads ();
int __iter_15__ = FORMA_MAX((__iter_2__-2),1) + (int)(threadIdx.z) ;
if(__iter_15__ <= FORMA_MIN(((__iter_2__+FORMA_BLOCKDIM_Z)-3),(L-2))) {
int __iter_16__ = FORMA_MAX((__iter_1__+2),1) + (int)(threadIdx.y) ;
for(; __iter_16__ <= FORMA_MIN(((__iter_1__+FORMA_BLOCKDIM_Y)-3),(M-2)); __iter_16__+=(int)(blockDim.y)) {
int __iter_17__ = FORMA_MAX((__iter_0__+2),1) + (int)(threadIdx.x) ;
if( __iter_17__ <= FORMA_MIN(((__iter_0__+FORMA_BLOCKDIM_X)-3),(N-2)) ){
float __temp_a50__ = (__tilevar_3__[__iter_17__+1-__iter_0__+FORMA_BLOCKDIM_X*(__iter_16__-__iter_1__+FORMA_BLOCKDIM_Y*mod(__iter_15__,rowz))]);
float __temp_a54__ = (__tilevar_3__[__iter_17__-1-__iter_0__+FORMA_BLOCKDIM_X*(__iter_16__-__iter_1__+FORMA_BLOCKDIM_Y*mod(__iter_15__,rowz))]);
float __temp_a55__ = (0.161f * __temp_a50__ + 0.162f * __temp_a54__);
float __temp_a59__ = (__tilevar_3__[__iter_17__-__iter_0__+FORMA_BLOCKDIM_X*(__iter_16__+1-__iter_1__+FORMA_BLOCKDIM_Y*mod(__iter_15__,rowz))]);
float __temp_a60__ = (__temp_a55__ + 0.163f * __temp_a59__);
float __temp_a64__ = (__tilevar_3__[__iter_17__-__iter_0__+FORMA_BLOCKDIM_X*(__iter_16__-1-__iter_1__+FORMA_BLOCKDIM_Y*mod(__iter_15__,rowz))]);
float __temp_a65__ = (__temp_a60__ + 0.164f * __temp_a64__);
float __temp_a69__ = (__tilevar_3__[__iter_17__-__iter_0__+FORMA_BLOCKDIM_X*(__iter_16__-__iter_1__+FORMA_BLOCKDIM_Y*mod(__iter_15__+1,rowz))]);
float __temp_a70__ = (__temp_a65__ + 0.165f * __temp_a69__);
float __temp_a74__ = (__tilevar_3__[__iter_17__-__iter_0__+FORMA_BLOCKDIM_X*(__iter_16__-__iter_1__+FORMA_BLOCKDIM_Y*mod(__iter_15__-1,rowz))]);
float __temp_a75__ = (__temp_a70__ + 0.166f * __temp_a74__);
float __temp_a79__ = (__tilevar_3__[__iter_17__-__iter_0__+FORMA_BLOCKDIM_X*(__iter_16__-__iter_1__+FORMA_BLOCKDIM_Y*mod(__iter_15__,rowz))]);
float __temp_a80__ = (__temp_a75__ - 1.670f * __temp_a79__);
__tilevar_4__[__iter_17__-__iter_0__+FORMA_BLOCKDIM_X*(__iter_16__-__iter_1__+FORMA_BLOCKDIM_Y*mod(__iter_15__,rowz))] = __temp_a80__;
}
}
}
int __iter_18__ = FORMA_MAX((__iter_2__-2),1) + (int)(threadIdx.z) ;
if(__iter_18__ <= FORMA_MIN(((__iter_2__+FORMA_BLOCKDIM_Z)-3),(L-2))) {
int __iter_19__ = FORMA_MAX((__iter_1__+2),1) + (int)(threadIdx.y) ;
for(; __iter_19__ <= FORMA_MIN(((__iter_1__+FORMA_BLOCKDIM_Y)-3),(M-2)) ; __iter_19__ += (int)(blockDim.y) ){
int __iter_20__ = FORMA_MAX((__iter_0__+2),1) + (int)(threadIdx.x) ;
if( __iter_20__ <= FORMA_MIN(((__iter_0__+FORMA_BLOCKDIM_X)-3),(N-2)) ){
if (__iter_19__ < (FORMA_MAX((__iter_1__+2),1)+2) || __iter_19__ > (FORMA_MIN(((__iter_1__+FORMA_BLOCKDIM_Y)-3),(M-2))-2) || __iter_20__ < (FORMA_MAX((__iter_0__+2),1)+2) || __iter_20__ > (FORMA_MIN(((__iter_0__+FORMA_BLOCKDIM_X)-3),(N-2))-2)) {
__copy_arr_1__[__iter_20__+N*(__iter_19__+M*(__iter_18__))] = __tilevar_4__[__iter_20__-__iter_0__+FORMA_BLOCKDIM_X*(__iter_19__-__iter_1__+FORMA_BLOCKDIM_Y*mod(__iter_18__,rowz))];
}
}
}
}
__syncthreads ();
int __iter_24__ = FORMA_MAX((__iter_2__-3),1) + (int)(threadIdx.z) ;
if(__iter_24__ <= FORMA_MIN(((__iter_2__+FORMA_BLOCKDIM_Z)-4),(L-2))) {
int __iter_25__ = FORMA_MAX((__iter_1__+3),1) + (int)(threadIdx.y) ;
for(; __iter_25__ <= FORMA_MIN(((__iter_1__+FORMA_BLOCKDIM_Y)-4),(M-2)); __iter_25__+=(int)(blockDim.y)) {
int __iter_26__ = FORMA_MAX((__iter_0__+3),1) + (int)(threadIdx.x) ;
if( __iter_26__ <= FORMA_MIN(((__iter_0__+FORMA_BLOCKDIM_X)-4),(N-2)) ){
float __temp_a94__ = (__tilevar_4__[__iter_26__+1-__iter_0__+FORMA_BLOCKDIM_X*(__iter_25__-__iter_1__+FORMA_BLOCKDIM_Y*mod(__iter_24__,rowz))]);
float __temp_a95__ = (__tilevar_4__[__iter_26__-1-__iter_0__+FORMA_BLOCKDIM_X*(__iter_25__-__iter_1__+FORMA_BLOCKDIM_Y*mod(__iter_24__,rowz))]);
float __temp_a96__ = (0.161f * __temp_a94__ + 0.162f * __temp_a95__);
float __temp_a97__ = (__tilevar_4__[__iter_26__-__iter_0__+FORMA_BLOCKDIM_X*(__iter_25__+1-__iter_1__+FORMA_BLOCKDIM_Y*mod(__iter_24__,rowz))]);
float __temp_a98__ = (__temp_a96__ + 0.163f * __temp_a97__);
float __temp_a99__ = (__tilevar_4__[__iter_26__-__iter_0__+FORMA_BLOCKDIM_X*(__iter_25__-1-__iter_1__+FORMA_BLOCKDIM_Y*mod(__iter_24__,rowz))]);
float __temp_a100__ = (__temp_a98__ + 0.164f * __temp_a99__);
float __temp_a101__ = (__tilevar_4__[__iter_26__-__iter_0__+FORMA_BLOCKDIM_X*(__iter_25__-__iter_1__+FORMA_BLOCKDIM_Y*mod(__iter_24__+1,rowz))]);
float __temp_a102__ = (__temp_a100__ + 0.165f * __temp_a101__);
float __temp_a103__ = (__tilevar_4__[__iter_26__-__iter_0__+FORMA_BLOCKDIM_X*(__iter_25__-__iter_1__+FORMA_BLOCKDIM_Y*mod(__iter_24__-1,rowz))]);
float __temp_a104__ = (__temp_a102__ + 0.166f * __temp_a103__);
float __temp_a105__ = (__tilevar_4__[__iter_26__-__iter_0__+FORMA_BLOCKDIM_X*(__iter_25__-__iter_1__+FORMA_BLOCKDIM_Y*mod(__iter_24__,rowz))]);
float __temp_a106__ = (__temp_a104__ - 1.670f * __temp_a105__);
__tilevar_5__[__iter_26__-__iter_0__+FORMA_BLOCKDIM_X*(__iter_25__-__iter_1__+FORMA_BLOCKDIM_Y*mod(__iter_24__,rowz))] = __temp_a106__;
}
}
}
int __iter_27__ = FORMA_MAX((__iter_2__-3),1) + (int)(threadIdx.z) ;
if(__iter_27__ <= FORMA_MIN(((__iter_2__+FORMA_BLOCKDIM_Z)-4),(L-2))) {
int __iter_28__ = FORMA_MAX((__iter_1__+3),1) + (int)(threadIdx.y) ;
for(; __iter_28__ <= FORMA_MIN(((__iter_1__+FORMA_BLOCKDIM_Y)-4),(M-2)) ; __iter_28__ += (int)(blockDim.y)){
int __iter_29__ = FORMA_MAX((__iter_0__+3),1) + (int)(threadIdx.x) ;
if( __iter_29__ <= FORMA_MIN(((__iter_0__+FORMA_BLOCKDIM_X)-4),(N-2)) ){
if (__iter_28__ < (FORMA_MAX((__iter_1__+3),1)+2) || __iter_28__ > (FORMA_MIN(((__iter_1__+FORMA_BLOCKDIM_Y)-4),(M-2))-2) || __iter_29__ < (FORMA_MAX((__iter_0__+3),1)+2) || __iter_29__ > (FORMA_MIN(((__iter_0__+FORMA_BLOCKDIM_X)-4),(N-2))-2)) {
__copy_arr_2__[__iter_29__+N*(__iter_28__+M*(__iter_27__))] = __tilevar_5__[__iter_29__-__iter_0__+FORMA_BLOCKDIM_X*(__iter_28__-__iter_1__+FORMA_BLOCKDIM_Y*mod(__iter_27__,rowz))];
}
}
}
}
__syncthreads ();
int __iter_33__ = FORMA_MAX((__iter_2__-4),1) + (int)(threadIdx.z) ;
if(__iter_33__ <= FORMA_MIN(((__iter_2__+FORMA_BLOCKDIM_Z)-5),(L-2))) {
int __iter_34__ = FORMA_MAX((__iter_1__+4),1) + (int)(threadIdx.y) ;
for (; __iter_34__ <= FORMA_MIN(((__iter_1__+FORMA_BLOCKDIM_Y)-5),(M-2)); __iter_34__+=(int)(blockDim.y)) {
int __iter_35__ = FORMA_MAX((__iter_0__+4),1) + (int)(threadIdx.x) ;
if( __iter_35__ <= FORMA_MIN(((__iter_0__+FORMA_BLOCKDIM_X)-5),(N-2)) ){
float __temp_a120__ = (__tilevar_5__[__iter_35__+1-__iter_0__+FORMA_BLOCKDIM_X*(__iter_34__-__iter_1__+FORMA_BLOCKDIM_Y*mod(__iter_33__,rowz))]);
float __temp_a121__ = (__tilevar_5__[__iter_35__-1-__iter_0__+FORMA_BLOCKDIM_X*(__iter_34__-__iter_1__+FORMA_BLOCKDIM_Y*mod(__iter_33__,rowz))]);
float __temp_a122__ = (0.161f * __temp_a120__ + 0.162f * __temp_a121__);
float __temp_a123__ = (__tilevar_5__[__iter_35__-__iter_0__+FORMA_BLOCKDIM_X*(__iter_34__+1-__iter_1__+FORMA_BLOCKDIM_Y*mod(__iter_33__,rowz))]);
float __temp_a124__ = (__temp_a122__ + 0.163f * __temp_a123__);
float __temp_a125__ = (__tilevar_5__[__iter_35__-__iter_0__+FORMA_BLOCKDIM_X*(__iter_34__-1-__iter_1__+FORMA_BLOCKDIM_Y*mod(__iter_33__,rowz))]);
float __temp_a126__ = (__temp_a124__ + 0.164f * __temp_a125__);
float __temp_a127__ = (__tilevar_5__[__iter_35__-__iter_0__+FORMA_BLOCKDIM_X*(__iter_34__-__iter_1__+FORMA_BLOCKDIM_Y*mod(__iter_33__+1,rowz))]);
float __temp_a128__ = (__temp_a126__ + 0.165f * __temp_a127__);
float __temp_a129__ = (__tilevar_5__[__iter_35__-__iter_0__+FORMA_BLOCKDIM_X*(__iter_34__-__iter_1__+FORMA_BLOCKDIM_Y*mod(__iter_33__-1,rowz))]);
float __temp_a130__ = (__temp_a128__ + 0.166f * __temp_a129__);
float __temp_a131__ = (__tilevar_5__[__iter_35__-__iter_0__+FORMA_BLOCKDIM_X*(__iter_34__-__iter_1__+FORMA_BLOCKDIM_Y*mod(__iter_33__,rowz))]);
float __temp_a132__ = (__temp_a130__ - 1.670f * __temp_a131__);
__var_1__[__iter_35__+N*(__iter_34__+M*(__iter_33__))] = __temp_a132__;
}
}
}
}
}
int __blockSizeToSMemSize___kernel___forma_kernel__0__(dim3 blockDim){
int FORMA_BLOCKDIM_Z = (int)(blockDim.z);
int FORMA_BLOCKDIM_Y = (int)(blockDim.y);
int FORMA_BLOCKDIM_X = (int)(blockDim.x);
int SMemSize = 0;
SMemSize += sizeof(float)*(2*(FORMA_BLOCKDIM_Z+4)*FORMA_BLOCKDIM_Y*FORMA_BLOCKDIM_X);
return SMemSize;
}
/* X+GAP, Y, Z */
__global__ void __kernel___forma_kernel__1__(float * __restrict__ input, int L, int M, int N, float * __restrict__ __copy_arr_0__, float * __restrict__ __copy_arr_1__, float * __restrict__ __copy_arr_2__, int FORMA_BLOCKDIM_X, int FORMA_BLOCKDIM_Y, int FORMA_BLOCKDIM_Z, float * __restrict__ __var_1__){
int __FORMA_SHARED_MEM_OFFSET__ = 0;
float* __tilevar_0__ = (float*)(__FORMA_SHARED_MEM__+__FORMA_SHARED_MEM_OFFSET__);
__FORMA_SHARED_MEM_OFFSET__ += sizeof(float)*(((FORMA_BLOCKDIM_Z+4)*FORMA_BLOCKDIM_Y*FORMA_BLOCKDIM_X));
float* __tilevar_1__ = (float*)(__FORMA_SHARED_MEM__+__FORMA_SHARED_MEM_OFFSET__);
__FORMA_SHARED_MEM_OFFSET__ += sizeof(float)*(((FORMA_BLOCKDIM_Z+4)*FORMA_BLOCKDIM_Y*FORMA_BLOCKDIM_X));
float * __tilevar_2__ = __tilevar_0__;
float * __tilevar_3__ = __tilevar_1__;
float * __tilevar_4__ = __tilevar_0__;
float * __tilevar_5__ = __tilevar_1__;
int rowz = FORMA_BLOCKDIM_Z+4;
int __iter_0__ = (int)(blockIdx.x)*((int)(FORMA_BLOCKDIM_X)+GAPX) + (int)(FORMA_BLOCKDIM_X);
int __iter_1__ = (int)(blockIdx.y)*((int)(FORMA_BLOCKDIM_Y)+GAPY);
for (int __iter_2__ = 0; __iter_2__ <= L-1; __iter_2__ += FORMA_BLOCKDIM_Z) {
int __iter_3__ = FORMA_MAX(__iter_2__,0) + (int)(threadIdx.z) ;
if(__iter_3__ <= FORMA_MIN(((__iter_2__+FORMA_BLOCKDIM_Z)-1),(L-1))) {
int __iter_4__ = FORMA_MAX(__iter_1__,0) + (int)(threadIdx.y) ;
for(; __iter_4__ <= FORMA_MIN(((__iter_1__+FORMA_BLOCKDIM_Y)-1),(M-1)); __iter_4__+=(int)(blockDim.y)) {
int __iter_5__ = FORMA_MAX(__iter_0__-2,0) + (int)(threadIdx.x) ;
if( __iter_5__ <= FORMA_MIN(((__iter_0__+GAPX+2)-1),(N-1)) ){
__tilevar_2__[__iter_5__+(EXTENT-__iter_0__)+FORMA_BLOCKDIM_X*(__iter_4__-__iter_1__+FORMA_BLOCKDIM_Y*mod(__iter_3__,rowz))] = input[__iter_5__+N*(__iter_4__+M*(__iter_3__))];
}
}
}
__syncthreads();
int __iter_6__ = FORMA_MAX((__iter_2__-1),1) + (int)(threadIdx.z) ;
if(__iter_6__ <= FORMA_MIN(((__iter_2__+FORMA_BLOCKDIM_Z)-2),(L-2))) {
int __iter_7__ = FORMA_MAX((__iter_1__+1),1) + (int)(threadIdx.y) ;
for(; __iter_7__ <= FORMA_MIN(((__iter_1__+FORMA_BLOCKDIM_Y)-2),(M-2)); __iter_7__+=(int)(blockDim.y)) {
int __iter_8__ = FORMA_MAX((__iter_0__-1),1) + (int)(threadIdx.x) ;
if( __iter_8__ <= FORMA_MIN(((__iter_0__+GAPX+1)-1),(N-2)) ){
float __temp_a3__ = (__tilevar_2__[__iter_8__+1+(EXTENT-__iter_0__)+FORMA_BLOCKDIM_X*(__iter_7__-__iter_1__+FORMA_BLOCKDIM_Y*mod(__iter_6__,rowz))]);
float __temp_a7__ = (__tilevar_2__[__iter_8__-1+(EXTENT-__iter_0__)+FORMA_BLOCKDIM_X*(__iter_7__-__iter_1__+FORMA_BLOCKDIM_Y*mod(__iter_6__,rowz))]);
float __temp_a8__ = (0.161f * __temp_a3__ + 0.162f * __temp_a7__);
float __temp_a12__ = (__tilevar_2__[__iter_8__+(EXTENT-__iter_0__)+FORMA_BLOCKDIM_X*(__iter_7__+1-__iter_1__+FORMA_BLOCKDIM_Y*mod(__iter_6__,rowz))]);
float __temp_a13__ = (__temp_a8__ + 0.163f * __temp_a12__);
float __temp_a17__ = (__tilevar_2__[__iter_8__+(EXTENT-__iter_0__)+FORMA_BLOCKDIM_X*(__iter_7__-1-__iter_1__+FORMA_BLOCKDIM_Y*mod(__iter_6__,rowz))]);
float __temp_a18__ = (__temp_a13__ + 0.164f * __temp_a17__);
float __temp_a22__ = (__tilevar_2__[__iter_8__+(EXTENT-__iter_0__)+FORMA_BLOCKDIM_X*(__iter_7__-__iter_1__+FORMA_BLOCKDIM_Y*mod(__iter_6__+1,rowz))]);
float __temp_a23__ = (__temp_a18__ + 0.165f * __temp_a22__);
float __temp_a27__ = (__tilevar_2__[__iter_8__+(EXTENT-__iter_0__)+FORMA_BLOCKDIM_X*(__iter_7__-__iter_1__+FORMA_BLOCKDIM_Y*mod(__iter_6__-1,rowz))]);
float __temp_a28__ = (__temp_a23__ + 0.166f * __temp_a27__);
float __temp_a32__ = (__tilevar_2__[__iter_8__+(EXTENT-__iter_0__)+FORMA_BLOCKDIM_X*(__iter_7__-__iter_1__+FORMA_BLOCKDIM_Y*mod( __iter_6__,rowz))]);
float __temp_a33__ = (__temp_a28__ - 1.670f * __temp_a32__);
__tilevar_3__[__iter_8__+(EXTENT-__iter_0__)+FORMA_BLOCKDIM_X*(__iter_7__-__iter_1__+FORMA_BLOCKDIM_Y*mod(__iter_6__,rowz))] = __temp_a33__;
}
}
}
int __iter_9__ = FORMA_MAX((__iter_2__-1),1) + (int)(threadIdx.z) ;
if(__iter_9__ <= FORMA_MIN(((__iter_2__+FORMA_BLOCKDIM_Z)-2),(L-2))) {
int __iter_10__ = FORMA_MAX((__iter_1__+1),1) + (int)(threadIdx.y) ;
for(; __iter_10__ <= FORMA_MIN(((__iter_1__+FORMA_BLOCKDIM_Y)-2),(M-2)) ; __iter_10__ += (int)(blockDim.y)){
int __iter_11__ = FORMA_MAX((__iter_0__-1),1) + (int)(threadIdx.x) ;
if( __iter_11__ <= FORMA_MIN(((__iter_0__+GAPX+1)-1),(N-2)) ){
if (__iter_10__ < (FORMA_MAX((__iter_1__+1),1)+2) || __iter_10__ > (FORMA_MIN(((__iter_1__+FORMA_BLOCKDIM_Y)-2),(M-2))-2)) {
__copy_arr_0__[__iter_11__+N*(__iter_10__+M*(__iter_9__))] = __tilevar_3__[__iter_11__+(EXTENT-__iter_0__)+FORMA_BLOCKDIM_X*(__iter_10__-__iter_1__+FORMA_BLOCKDIM_Y*mod(__iter_9__,rowz))];
}
}
}
}
__iter_9__ = FORMA_MAX((__iter_2__-1),1) + (int)(threadIdx.z) ;
if(__iter_9__ <= FORMA_MIN(((__iter_2__+FORMA_BLOCKDIM_Z)-2),(L-2))) {
int __iter_10__ = FORMA_MAX((__iter_1__+1),1) + (int)(threadIdx.y) ;
for(; __iter_10__ <= FORMA_MIN(((__iter_1__+FORMA_BLOCKDIM_Y)-2),(M-2)) ; __iter_10__ += (int)(blockDim.y)){
int __iter_11__ = FORMA_MAX((__iter_0__-3),1) + (int)(threadIdx.x) ;
if( __iter_11__ <= FORMA_MIN(((__iter_0__+GAPX+3)-1),(N-2)) ){
if (__iter_11__ < FORMA_MAX((__iter_0__-1),1) || __iter_11__ > FORMA_MIN(((__iter_0__+GAPX+1)-1),(N-2))) {
__tilevar_3__[__iter_11__+(EXTENT-__iter_0__)+FORMA_BLOCKDIM_X*(__iter_10__-__iter_1__+FORMA_BLOCKDIM_Y*mod(__iter_9__,rowz))] = __copy_arr_0__[__iter_11__+N*(__iter_10__+M*(__iter_9__))];
}
}
}
}
__syncthreads();
int __iter_15__ = FORMA_MAX((__iter_2__-2),1) + (int)(threadIdx.z) ;
if(__iter_15__ <= FORMA_MIN(((__iter_2__+FORMA_BLOCKDIM_Z)-3),(L-2))) {
int __iter_16__ = FORMA_MAX((__iter_1__+2),1) + (int)(threadIdx.y) ;
for (;__iter_16__ <= FORMA_MIN(((__iter_1__+FORMA_BLOCKDIM_Y)-3),(M-2)); __iter_16__+=(int)(blockDim.y)) {
int __iter_17__ = FORMA_MAX((__iter_0__-2),1) + (int)(threadIdx.x) ;
if( __iter_17__ <= FORMA_MIN(((__iter_0__+GAPX+2)-1),(N-2)) ){
float __temp_a50__ = (__tilevar_3__[__iter_17__+1+(EXTENT-__iter_0__)+FORMA_BLOCKDIM_X*(__iter_16__-__iter_1__+FORMA_BLOCKDIM_Y*mod(__iter_15__,rowz))]);
float __temp_a54__ = (__tilevar_3__[__iter_17__-1+(EXTENT-__iter_0__)+FORMA_BLOCKDIM_X*(__iter_16__-__iter_1__+FORMA_BLOCKDIM_Y*mod(__iter_15__,rowz))]);
float __temp_a55__ = (0.161f * __temp_a50__ + 0.162f * __temp_a54__);
float __temp_a59__ = (__tilevar_3__[__iter_17__+(EXTENT-__iter_0__)+FORMA_BLOCKDIM_X*(__iter_16__+1-__iter_1__+FORMA_BLOCKDIM_Y*mod(__iter_15__,rowz))]);
float __temp_a60__ = (__temp_a55__ + 0.163f * __temp_a59__);
float __temp_a64__ = (__tilevar_3__[__iter_17__+(EXTENT-__iter_0__)+FORMA_BLOCKDIM_X*(__iter_16__-1-__iter_1__+FORMA_BLOCKDIM_Y*mod(__iter_15__,rowz))]);
float __temp_a65__ = (__temp_a60__ + 0.164f * __temp_a64__);
float __temp_a69__ = (__tilevar_3__[__iter_17__+(EXTENT-__iter_0__)+FORMA_BLOCKDIM_X*(__iter_16__-__iter_1__+FORMA_BLOCKDIM_Y*mod(__iter_15__+1,rowz))]);
float __temp_a70__ = (__temp_a65__ + 0.165f * __temp_a69__);
float __temp_a74__ = (__tilevar_3__[__iter_17__+(EXTENT-__iter_0__)+FORMA_BLOCKDIM_X*(__iter_16__-__iter_1__+FORMA_BLOCKDIM_Y*mod(__iter_15__-1,rowz))]);
float __temp_a75__ = (__temp_a70__ + 0.166f * __temp_a74__);
float __temp_a79__ = (__tilevar_3__[__iter_17__+(EXTENT-__iter_0__)+FORMA_BLOCKDIM_X*(__iter_16__-__iter_1__+FORMA_BLOCKDIM_Y*mod(__iter_15__,rowz))]);
float __temp_a80__ = (__temp_a75__ - 1.670f * __temp_a79__);
__tilevar_4__[__iter_17__+(EXTENT-__iter_0__)+FORMA_BLOCKDIM_X*(__iter_16__-__iter_1__+FORMA_BLOCKDIM_Y*mod(__iter_15__,rowz))] = __temp_a80__;
}
}
}
int __iter_18__ = FORMA_MAX((__iter_2__-2),1) + (int)(threadIdx.z) ;
if(__iter_18__ <= FORMA_MIN(((__iter_2__+FORMA_BLOCKDIM_Z)-3),(L-2))) {
int __iter_19__ = FORMA_MAX((__iter_1__+2),1) + (int)(threadIdx.y) ;
for(; __iter_19__ <= FORMA_MIN(((__iter_1__+FORMA_BLOCKDIM_Y)-3),(M-2)) ; __iter_19__ += (int)(blockDim.y) ){
int __iter_20__ = FORMA_MAX((__iter_0__-2),1) + (int)(threadIdx.x) ;
if( __iter_20__ <= FORMA_MIN(((__iter_0__+GAPX+2)-1),(N-2)) ){
if (__iter_19__ < (FORMA_MAX((__iter_1__+2),1)+2) || __iter_19__ > (FORMA_MIN(((__iter_1__+FORMA_BLOCKDIM_Y)-3),(M-2))-2)) {
__copy_arr_1__[__iter_20__+N*(__iter_19__+M*(__iter_18__))] = __tilevar_4__[__iter_20__+(EXTENT-__iter_0__)+FORMA_BLOCKDIM_X*(__iter_19__-__iter_1__+FORMA_BLOCKDIM_Y*mod(__iter_18__,rowz))];
}
}
}
}
__iter_18__ = FORMA_MAX((__iter_2__-2),1) + (int)(threadIdx.z) ;
if(__iter_18__ <= FORMA_MIN(((__iter_2__+FORMA_BLOCKDIM_Z)-3),(L-2))) {
int __iter_19__ = FORMA_MAX((__iter_1__+2),1) + (int)(threadIdx.y) ;
for(; __iter_19__ <= FORMA_MIN(((__iter_1__+FORMA_BLOCKDIM_Y)-3),(M-2)) ; __iter_19__ += (int)(blockDim.y) ){
int __iter_20__ = FORMA_MAX((__iter_0__-4),1) + (int)(threadIdx.x) ;
if( __iter_20__ <= FORMA_MIN(((__iter_0__+GAPX+4)-1),(N-2)) ){
if (__iter_20__ < FORMA_MAX((__iter_0__-2),1) || __iter_20__ > FORMA_MIN(((__iter_0__+GAPX+2)-1),(N-2))) {
__tilevar_4__[__iter_20__+(EXTENT-__iter_0__)+FORMA_BLOCKDIM_X*(__iter_19__-__iter_1__+FORMA_BLOCKDIM_Y*mod(__iter_18__,rowz))] = __copy_arr_1__[__iter_20__+N*(__iter_19__+M*(__iter_18__))];
}
}
}
}
__syncthreads();
int __iter_24__ = FORMA_MAX((__iter_2__-3),1) + (int)(threadIdx.z) ;
if(__iter_24__ <= FORMA_MIN(((__iter_2__+FORMA_BLOCKDIM_Z)-4),(L-2))) {
int __iter_25__ = FORMA_MAX((__iter_1__+3),1) + (int)(threadIdx.y) ;
for(; __iter_25__ <= FORMA_MIN(((__iter_1__+FORMA_BLOCKDIM_Y)-4),(M-2)); __iter_25__+=(int)(blockDim.y)) {
int __iter_26__ = FORMA_MAX((__iter_0__-3),1) + (int)(threadIdx.x) ;
if( __iter_26__ <= FORMA_MIN(((__iter_0__+GAPX+3)-1),(N-2)) ){
float __temp_a94__ = (__tilevar_4__[__iter_26__+1+(EXTENT-__iter_0__)+FORMA_BLOCKDIM_X*(__iter_25__-__iter_1__+FORMA_BLOCKDIM_Y*mod(__iter_24__,rowz))]);
float __temp_a95__ = (__tilevar_4__[__iter_26__-1+(EXTENT-__iter_0__)+FORMA_BLOCKDIM_X*(__iter_25__-__iter_1__+FORMA_BLOCKDIM_Y*mod(__iter_24__,rowz))]);
float __temp_a96__ = (0.161f * __temp_a94__ + 0.162f * __temp_a95__);
float __temp_a97__ = (__tilevar_4__[__iter_26__+(EXTENT-__iter_0__)+FORMA_BLOCKDIM_X*(__iter_25__+1-__iter_1__+FORMA_BLOCKDIM_Y*mod(__iter_24__,rowz))]);
float __temp_a98__ = (__temp_a96__ + 0.163f * __temp_a97__);
float __temp_a99__ = (__tilevar_4__[__iter_26__+(EXTENT-__iter_0__)+FORMA_BLOCKDIM_X*(__iter_25__-1-__iter_1__+FORMA_BLOCKDIM_Y*mod(__iter_24__,rowz))]);
float __temp_a100__ = (__temp_a98__ + 0.164f * __temp_a99__);
float __temp_a101__ = (__tilevar_4__[__iter_26__+(EXTENT-__iter_0__)+FORMA_BLOCKDIM_X*(__iter_25__-__iter_1__+FORMA_BLOCKDIM_Y*mod(__iter_24__+1,rowz))]);
float __temp_a102__ = (__temp_a100__ + 0.165f * __temp_a101__);
float __temp_a103__ = (__tilevar_4__[__iter_26__+(EXTENT-__iter_0__)+FORMA_BLOCKDIM_X*(__iter_25__-__iter_1__+FORMA_BLOCKDIM_Y*mod(__iter_24__-1,rowz))]);
float __temp_a104__ = (__temp_a102__ + 0.166f * __temp_a103__);
float __temp_a105__ = (__tilevar_4__[__iter_26__+(EXTENT-__iter_0__)+FORMA_BLOCKDIM_X*(__iter_25__-__iter_1__+FORMA_BLOCKDIM_Y*mod(__iter_24__,rowz))]);
float __temp_a106__ = (__temp_a104__ - 1.670f * __temp_a105__);
__tilevar_5__[__iter_26__+(EXTENT-__iter_0__)+FORMA_BLOCKDIM_X*(__iter_25__-__iter_1__+FORMA_BLOCKDIM_Y*mod(__iter_24__,rowz))] = __temp_a106__;
}
}
}
int __iter_27__ = FORMA_MAX((__iter_2__-3),1) + (int)(threadIdx.z) ;
if(__iter_27__ <= FORMA_MIN(((__iter_2__+FORMA_BLOCKDIM_Z)-4),(L-2))) {
int __iter_28__ = FORMA_MAX((__iter_1__+3),1) + (int)(threadIdx.y) ;
for(; __iter_28__ <= FORMA_MIN(((__iter_1__+FORMA_BLOCKDIM_Y)-4),(M-2)) ; __iter_28__ += (int)(blockDim.y)){
int __iter_29__ = FORMA_MAX((__iter_0__-3),1) + (int)(threadIdx.x) ;
if( __iter_29__ <= FORMA_MIN(((__iter_0__+GAPX+3)-1),(N-2)) ){
if (__iter_28__ < (FORMA_MAX((__iter_1__+3),1)+2) || __iter_28__ > (FORMA_MIN(((__iter_1__+FORMA_BLOCKDIM_Y)-4),(M-2))-2)) {
__copy_arr_2__[__iter_29__+N*(__iter_28__+M*(__iter_27__))] = __tilevar_5__[__iter_29__+(EXTENT-__iter_0__)+FORMA_BLOCKDIM_X*(__iter_28__-__iter_1__+FORMA_BLOCKDIM_Y*mod(__iter_27__,rowz))];
}
}
}
}
__iter_27__ = FORMA_MAX((__iter_2__-3),1) + (int)(threadIdx.z) ;
if(__iter_27__ <= FORMA_MIN(((__iter_2__+FORMA_BLOCKDIM_Z)-4),(L-2))) {
int __iter_28__ = FORMA_MAX((__iter_1__+3),1) + (int)(threadIdx.y) ;
for(; __iter_28__ <= FORMA_MIN(((__iter_1__+FORMA_BLOCKDIM_Y)-4),(M-2)) ; __iter_28__ += (int)(blockDim.y)){
int __iter_29__ = FORMA_MAX((__iter_0__-5),1) + (int)(threadIdx.x) ;
if( __iter_29__ <= FORMA_MIN(((__iter_0__+GAPX+5)-1),(N-2)) ){
if (__iter_29__ < FORMA_MAX((__iter_0__-3),1) || __iter_29__ > FORMA_MIN(((__iter_0__+GAPX+3)-1),(N-2))) {
__tilevar_5__[__iter_29__+(EXTENT-__iter_0__)+FORMA_BLOCKDIM_X*(__iter_28__-__iter_1__+FORMA_BLOCKDIM_Y*mod(__iter_27__,rowz))] = __copy_arr_2__[__iter_29__+N*(__iter_28__+M*(__iter_27__))];
}
}
}
}
__syncthreads();
int __iter_33__ = FORMA_MAX((__iter_2__-4),1) + (int)(threadIdx.z) ;
if(__iter_33__ <= FORMA_MIN(((__iter_2__+FORMA_BLOCKDIM_Z)-5),(L-2))) {
int __iter_34__ = FORMA_MAX((__iter_1__+4),1) + (int)(threadIdx.y) ;
for(; __iter_34__ <= FORMA_MIN(((__iter_1__+FORMA_BLOCKDIM_Y)-5),(M-2)); __iter_34__+=(int)(blockDim.y)) {
int __iter_35__ = FORMA_MAX((__iter_0__-4),1) + (int)(threadIdx.x) ;
if( __iter_35__ <= FORMA_MIN(((__iter_0__+GAPX+4)-1),(N-2)) ){
float __temp_a120__ = (__tilevar_5__[__iter_35__+1+(EXTENT-__iter_0__)+FORMA_BLOCKDIM_X*(__iter_34__-__iter_1__+FORMA_BLOCKDIM_Y*mod(__iter_33__,rowz))]);
float __temp_a121__ = (__tilevar_5__[__iter_35__-1+(EXTENT-__iter_0__)+FORMA_BLOCKDIM_X*(__iter_34__-__iter_1__+FORMA_BLOCKDIM_Y*mod(__iter_33__,rowz))]);
float __temp_a122__ = (0.161f * __temp_a120__ + 0.162f * __temp_a121__);
float __temp_a123__ = (__tilevar_5__[__iter_35__+(EXTENT-__iter_0__)+FORMA_BLOCKDIM_X*(__iter_34__+1-__iter_1__+FORMA_BLOCKDIM_Y*mod(__iter_33__,rowz))]);
float __temp_a124__ = (__temp_a122__ + 0.163f * __temp_a123__);
float __temp_a125__ = (__tilevar_5__[__iter_35__+(EXTENT-__iter_0__)+FORMA_BLOCKDIM_X*(__iter_34__-1-__iter_1__+FORMA_BLOCKDIM_Y*mod(__iter_33__,rowz))]);
float __temp_a126__ = (__temp_a124__ + 0.164f * __temp_a125__);
float __temp_a127__ = (__tilevar_5__[__iter_35__+(EXTENT-__iter_0__)+FORMA_BLOCKDIM_X*(__iter_34__-__iter_1__+FORMA_BLOCKDIM_Y*mod(__iter_33__+1,rowz))]);
float __temp_a128__ = (__temp_a126__ + 0.165f * __temp_a127__);
float __temp_a129__ = (__tilevar_5__[__iter_35__+(EXTENT-__iter_0__)+FORMA_BLOCKDIM_X*(__iter_34__-__iter_1__+FORMA_BLOCKDIM_Y*mod(__iter_33__-1,rowz))]);
float __temp_a130__ = (__temp_a128__ + 0.166f * __temp_a129__);
float __temp_a131__ = (__tilevar_5__[__iter_35__+(EXTENT-__iter_0__)+FORMA_BLOCKDIM_X*(__iter_34__-__iter_1__+FORMA_BLOCKDIM_Y*mod(__iter_33__,rowz))]);
float __temp_a132__ = (__temp_a130__ - 1.670f * __temp_a131__);
__var_1__[__iter_35__+N*(__iter_34__+M*(__iter_33__))] = __temp_a132__;
}
}
}
}
}
/* X, Y+GAP, Z */
__global__ void __kernel___forma_kernel__2__(float * __restrict__ input, int L, int M, int N, float * __restrict__ __copy_arr_0__, float * __restrict__ __copy_arr_1__, float * __restrict__ __copy_arr_2__, int FORMA_BLOCKDIM_X, int FORMA_BLOCKDIM_Y, int FORMA_BLOCKDIM_Z, float * __restrict__ __var_1__){
int __FORMA_SHARED_MEM_OFFSET__ = 0;
float* __tilevar_0__ = (float*)(__FORMA_SHARED_MEM__+__FORMA_SHARED_MEM_OFFSET__);
__FORMA_SHARED_MEM_OFFSET__ += sizeof(float)*(((FORMA_BLOCKDIM_Z+4)*FORMA_BLOCKDIM_Y*FORMA_BLOCKDIM_X));
float* __tilevar_1__ = (float*)(__FORMA_SHARED_MEM__+__FORMA_SHARED_MEM_OFFSET__);
__FORMA_SHARED_MEM_OFFSET__ += sizeof(float)*(((FORMA_BLOCKDIM_Z+4)*FORMA_BLOCKDIM_Y*FORMA_BLOCKDIM_X));
float * __tilevar_2__ = __tilevar_0__;
float * __tilevar_3__ = __tilevar_1__;
float * __tilevar_4__ = __tilevar_0__;
float * __tilevar_5__ = __tilevar_1__;
int rowz = FORMA_BLOCKDIM_Z+4;
int __iter_0__ = (int)(blockIdx.x)*((int)(FORMA_BLOCKDIM_X)+GAPX);
int __iter_1__ = (int)(blockIdx.y)*((int)(FORMA_BLOCKDIM_Y)+GAPY) + (int)(FORMA_BLOCKDIM_Y);
for (int __iter_2__ = 0; __iter_2__ <= L-1; __iter_2__ += FORMA_BLOCKDIM_Z) {
int __iter_3__ = FORMA_MAX(__iter_2__,0) + (int)(threadIdx.z) ;
if(__iter_3__ <= FORMA_MIN(((__iter_2__+FORMA_BLOCKDIM_Z)-1),(L-1))) {
int __iter_4__ = FORMA_MAX(__iter_1__-2,0) + (int)(threadIdx.y) ;
for(; __iter_4__ <= FORMA_MIN(((__iter_1__+GAPY+2)-1),(M-1)); __iter_4__+=(int)(blockDim.y)) {
int __iter_5__ = FORMA_MAX(__iter_0__,0) + (int)(threadIdx.x) ;
if( __iter_5__ <= FORMA_MIN(((__iter_0__+FORMA_BLOCKDIM_X)-1),(N-1)) ){
__tilevar_2__[__iter_5__-__iter_0__+FORMA_BLOCKDIM_X*(__iter_4__+(EXTENT-__iter_1__)+FORMA_BLOCKDIM_Y*mod(__iter_3__,rowz))] = input[__iter_5__+N*(__iter_4__+M*(__iter_3__))];
}
}
}
__syncthreads();
int __iter_6__ = FORMA_MAX((__iter_2__-1),1) + (int)(threadIdx.z) ;
if(__iter_6__ <= FORMA_MIN(((__iter_2__+FORMA_BLOCKDIM_Z)-2),(L-2))) {
int __iter_7__ = FORMA_MAX((__iter_1__-1),1) + (int)(threadIdx.y) ;
for(; __iter_7__ <= FORMA_MIN(((__iter_1__+GAPY+1)-1),(M-2)); __iter_7__+=(int)(blockDim.y)) {
int __iter_8__ = FORMA_MAX((__iter_0__+1),1) + (int)(threadIdx.x) ;
if( __iter_8__ <= FORMA_MIN(((__iter_0__+FORMA_BLOCKDIM_X)-2),(N-2)) ){
float __temp_a3__ = (__tilevar_2__[__iter_8__+1-__iter_0__+FORMA_BLOCKDIM_X*(__iter_7__+(EXTENT-__iter_1__)+FORMA_BLOCKDIM_Y*mod(__iter_6__,rowz))]);
float __temp_a7__ = (__tilevar_2__[__iter_8__-1-__iter_0__+FORMA_BLOCKDIM_X*(__iter_7__+(EXTENT-__iter_1__)+FORMA_BLOCKDIM_Y*mod(__iter_6__,rowz))]);
float __temp_a8__ = (0.161f * __temp_a3__ + 0.162f * __temp_a7__);
float __temp_a12__ = (__tilevar_2__[__iter_8__-__iter_0__+FORMA_BLOCKDIM_X*(__iter_7__+1+(EXTENT-__iter_1__)+FORMA_BLOCKDIM_Y*mod(__iter_6__,rowz))]);
float __temp_a13__ = (__temp_a8__ + 0.163f * __temp_a12__);
float __temp_a17__ = (__tilevar_2__[__iter_8__-__iter_0__+FORMA_BLOCKDIM_X*(__iter_7__-1+(EXTENT-__iter_1__)+FORMA_BLOCKDIM_Y*mod(__iter_6__,rowz))]);
float __temp_a18__ = (__temp_a13__ + 0.164f * __temp_a17__);
float __temp_a22__ = (__tilevar_2__[__iter_8__-__iter_0__+FORMA_BLOCKDIM_X*(__iter_7__+(EXTENT-__iter_1__)+FORMA_BLOCKDIM_Y*mod(__iter_6__+1,rowz))]);
float __temp_a23__ = (__temp_a18__ + 0.165f * __temp_a22__);
float __temp_a27__ = (__tilevar_2__[__iter_8__-__iter_0__+FORMA_BLOCKDIM_X*(__iter_7__+(EXTENT-__iter_1__)+FORMA_BLOCKDIM_Y*mod(__iter_6__-1,rowz))]);
float __temp_a28__ = (__temp_a23__ + 0.166f * __temp_a27__);
float __temp_a32__ = (__tilevar_2__[__iter_8__-__iter_0__+FORMA_BLOCKDIM_X*(__iter_7__+(EXTENT-__iter_1__)+FORMA_BLOCKDIM_Y*mod(__iter_6__,rowz))]);
float __temp_a33__ = (__temp_a28__ - 1.670f * __temp_a32__);
__tilevar_3__[__iter_8__-__iter_0__+FORMA_BLOCKDIM_X*(__iter_7__+(EXTENT-__iter_1__)+FORMA_BLOCKDIM_Y*mod(__iter_6__,rowz))] = __temp_a33__;
}
}
}
int __iter_9__ = FORMA_MAX((__iter_2__-1),1) + (int)(threadIdx.z) ;
if(__iter_9__ <= FORMA_MIN(((__iter_2__+FORMA_BLOCKDIM_Z)-2),(L-2))) {
int __iter_10__ = FORMA_MAX((__iter_1__-1),1) + (int)(threadIdx.y) ;
for(; __iter_10__ <= FORMA_MIN(((__iter_1__+GAPY+1)-1),(M-2)) ; __iter_10__ += (int)(blockDim.y)){
int __iter_11__ = FORMA_MAX((__iter_0__+1),1) + (int)(threadIdx.x) ;
if( __iter_11__ <= FORMA_MIN(((__iter_0__+FORMA_BLOCKDIM_X)-2),(N-2)) ){
if (__iter_11__ < (FORMA_MAX((__iter_0__+1),1)+2) || __iter_11__ > (FORMA_MIN(((__iter_0__+FORMA_BLOCKDIM_X)-2),(N-2))-2)) {
__copy_arr_0__[__iter_11__+N*(__iter_10__+M*(__iter_9__))] = __tilevar_3__[__iter_11__-__iter_0__+FORMA_BLOCKDIM_X*(__iter_10__+(EXTENT-__iter_1__)+FORMA_BLOCKDIM_Y*mod(__iter_9__,rowz))];
}
}
}
}
__iter_9__ = FORMA_MAX((__iter_2__-1),1) + (int)(threadIdx.z) ;
if(__iter_9__ <= FORMA_MIN(((__iter_2__+FORMA_BLOCKDIM_Z)-2),(L-2))) {
int __iter_10__ = FORMA_MAX((__iter_1__-3),1) + (int)(threadIdx.y) ;
for(; __iter_10__ <= FORMA_MIN(((__iter_1__+GAPY+3)-1),(M-2)) ; __iter_10__ += (int)(blockDim.y)){
int __iter_11__ = FORMA_MAX((__iter_0__+1),1) + (int)(threadIdx.x) ;
if( __iter_11__ <= FORMA_MIN(((__iter_0__+FORMA_BLOCKDIM_X)-2),(N-2)) ){
if (__iter_10__ < FORMA_MAX((__iter_1__-1),1) || __iter_10__ > FORMA_MIN(((__iter_1__+GAPY+1)-1),(M-2))) {
__tilevar_3__[__iter_11__-__iter_0__+FORMA_BLOCKDIM_X*(__iter_10__+(EXTENT-__iter_1__)+FORMA_BLOCKDIM_Y*mod(__iter_9__,rowz))] = __copy_arr_0__[__iter_11__+N*(__iter_10__+M*(__iter_9__))];
}
}
}
}
__syncthreads();
int __iter_15__ = FORMA_MAX((__iter_2__-2),1) + (int)(threadIdx.z) ;
if(__iter_15__ <= FORMA_MIN(((__iter_2__+FORMA_BLOCKDIM_Z)-3),(L-2))) {
int __iter_16__ = FORMA_MAX((__iter_1__-2),1) + (int)(threadIdx.y) ;
for(; __iter_16__ <= FORMA_MIN(((__iter_1__+GAPY+2)-1),(M-2)); __iter_16__+=(int)(blockDim.y)) {
int __iter_17__ = FORMA_MAX((__iter_0__+2),1) + (int)(threadIdx.x) ;
if( __iter_17__ <= FORMA_MIN(((__iter_0__+FORMA_BLOCKDIM_X)-3),(N-2)) ){
float __temp_a50__ = (__tilevar_3__[__iter_17__+1-__iter_0__+FORMA_BLOCKDIM_X*(__iter_16__+(EXTENT-__iter_1__)+FORMA_BLOCKDIM_Y*mod(__iter_15__,rowz))]);
float __temp_a54__ = (__tilevar_3__[__iter_17__-1-__iter_0__+FORMA_BLOCKDIM_X*(__iter_16__+(EXTENT-__iter_1__)+FORMA_BLOCKDIM_Y*mod(__iter_15__,rowz))]);
float __temp_a55__ = (0.161f * __temp_a50__ + 0.162f * __temp_a54__);
float __temp_a59__ = (__tilevar_3__[__iter_17__-__iter_0__+FORMA_BLOCKDIM_X*(__iter_16__+1+(EXTENT-__iter_1__)+FORMA_BLOCKDIM_Y*mod(__iter_15__,rowz))]);
float __temp_a60__ = (__temp_a55__ + 0.163f * __temp_a59__);
float __temp_a64__ = (__tilevar_3__[__iter_17__-__iter_0__+FORMA_BLOCKDIM_X*(__iter_16__-1+(EXTENT-__iter_1__)+FORMA_BLOCKDIM_Y*mod(__iter_15__,rowz))]);
float __temp_a65__ = (__temp_a60__ + 0.164f * __temp_a64__);
float __temp_a69__ = (__tilevar_3__[__iter_17__-__iter_0__+FORMA_BLOCKDIM_X*(__iter_16__+(EXTENT-__iter_1__)+FORMA_BLOCKDIM_Y*mod(__iter_15__+1,rowz))]);
float __temp_a70__ = (__temp_a65__ + 0.165f * __temp_a69__);
float __temp_a74__ = (__tilevar_3__[__iter_17__-__iter_0__+FORMA_BLOCKDIM_X*(__iter_16__+(EXTENT-__iter_1__)+FORMA_BLOCKDIM_Y*mod(__iter_15__-1,rowz))]);
float __temp_a75__ = (__temp_a70__ + 0.166f * __temp_a74__);
float __temp_a79__ = (__tilevar_3__[__iter_17__-__iter_0__+FORMA_BLOCKDIM_X*(__iter_16__+(EXTENT-__iter_1__)+FORMA_BLOCKDIM_Y*mod(__iter_15__,rowz))]);
float __temp_a80__ = (__temp_a75__ - 1.670f * __temp_a79__);
__tilevar_4__[__iter_17__-__iter_0__+FORMA_BLOCKDIM_X*(__iter_16__+(EXTENT-__iter_1__)+FORMA_BLOCKDIM_Y*mod(__iter_15__,rowz))] = __temp_a80__;
}
}
}
int __iter_18__ = FORMA_MAX((__iter_2__-2),1) + (int)(threadIdx.z) ;
if(__iter_18__ <= FORMA_MIN(((__iter_2__+FORMA_BLOCKDIM_Z)-3),(L-2))) {
int __iter_19__ = FORMA_MAX((__iter_1__-2),1) + (int)(threadIdx.y) ;
for(; __iter_19__ <= FORMA_MIN(((__iter_1__+GAPY+2)-1),(M-2)) ; __iter_19__ += (int)(blockDim.y) ){
int __iter_20__ = FORMA_MAX((__iter_0__+2),1) + (int)(threadIdx.x) ;
if( __iter_20__ <= FORMA_MIN(((__iter_0__+FORMA_BLOCKDIM_X)-3),(N-2)) ){
if (__iter_20__ < (FORMA_MAX((__iter_0__+2),1)+2) || __iter_20__ > (FORMA_MIN(((__iter_0__+FORMA_BLOCKDIM_X)-3),(N-2))-2)) {
__copy_arr_1__[__iter_20__+N*(__iter_19__+M*(__iter_18__))] = __tilevar_4__[__iter_20__-__iter_0__+FORMA_BLOCKDIM_X*(__iter_19__+(EXTENT-__iter_1__)+FORMA_BLOCKDIM_Y*mod(__iter_18__,rowz))];
}
}
}
}
__iter_18__ = FORMA_MAX((__iter_2__-2),1) + (int)(threadIdx.z) ;
if(__iter_18__ <= FORMA_MIN(((__iter_2__+FORMA_BLOCKDIM_Z)-3),(L-2))) {
int __iter_19__ = FORMA_MAX((__iter_1__-4),1) + (int)(threadIdx.y) ;
for(; __iter_19__ <= FORMA_MIN(((__iter_1__+GAPY+4)-1),(M-2)) ; __iter_19__ += (int)(blockDim.y) ){
int __iter_20__ = FORMA_MAX((__iter_0__+2),1) + (int)(threadIdx.x) ;
if( __iter_20__ <= FORMA_MIN(((__iter_0__+FORMA_BLOCKDIM_X)-3),(N-2)) ){
if (__iter_19__ < FORMA_MAX((__iter_1__-2),1) || __iter_19__ > FORMA_MIN(((__iter_1__+GAPY+2)-1),(M-2))) {
__tilevar_4__[__iter_20__-__iter_0__+FORMA_BLOCKDIM_X*(__iter_19__+(EXTENT-__iter_1__)+FORMA_BLOCKDIM_Y*mod(__iter_18__,rowz))] = __copy_arr_1__[__iter_20__+N*(__iter_19__+M*(__iter_18__))];
}
}
}
}
__syncthreads();
int __iter_24__ = FORMA_MAX((__iter_2__-3),1) + (int)(threadIdx.z) ;
if(__iter_24__ <= FORMA_MIN(((__iter_2__+FORMA_BLOCKDIM_Z)-4),(L-2))) {
int __iter_25__ = FORMA_MAX((__iter_1__-3),1) + (int)(threadIdx.y) ;
for (; __iter_25__ <= FORMA_MIN(((__iter_1__+GAPY+3)-1),(M-2)); __iter_25__+=(int)(blockDim.y)) {
int __iter_26__ = FORMA_MAX((__iter_0__+3),1) + (int)(threadIdx.x) ;
if( __iter_26__ <= FORMA_MIN(((__iter_0__+FORMA_BLOCKDIM_X)-4),(N-2)) ){
float __temp_a94__ = (__tilevar_4__[__iter_26__+1-__iter_0__+FORMA_BLOCKDIM_X*(__iter_25__+(EXTENT-__iter_1__)+FORMA_BLOCKDIM_Y*mod(__iter_24__,rowz))]);
float __temp_a95__ = (__tilevar_4__[__iter_26__-1-__iter_0__+FORMA_BLOCKDIM_X*(__iter_25__+(EXTENT-__iter_1__)+FORMA_BLOCKDIM_Y*mod(__iter_24__,rowz))]);
float __temp_a96__ = (0.161f * __temp_a94__ + 0.162f * __temp_a95__);
float __temp_a97__ = (__tilevar_4__[__iter_26__-__iter_0__+FORMA_BLOCKDIM_X*(__iter_25__+1+(EXTENT-__iter_1__)+FORMA_BLOCKDIM_Y*mod(__iter_24__,rowz))]);
float __temp_a98__ = (__temp_a96__ + 0.163f * __temp_a97__);
float __temp_a99__ = (__tilevar_4__[__iter_26__-__iter_0__+FORMA_BLOCKDIM_X*(__iter_25__-1+(EXTENT-__iter_1__)+FORMA_BLOCKDIM_Y*mod(__iter_24__,rowz))]);
float __temp_a100__ = (__temp_a98__ + 0.164f * __temp_a99__);
float __temp_a101__ = (__tilevar_4__[__iter_26__-__iter_0__+FORMA_BLOCKDIM_X*(__iter_25__+(EXTENT-__iter_1__)+FORMA_BLOCKDIM_Y*mod(__iter_24__+1,rowz))]);
float __temp_a102__ = (__temp_a100__ + 0.165f * __temp_a101__);
float __temp_a103__ = (__tilevar_4__[__iter_26__-__iter_0__+FORMA_BLOCKDIM_X*(__iter_25__+(EXTENT-__iter_1__)+FORMA_BLOCKDIM_Y*mod(__iter_24__-1,rowz))]);
float __temp_a104__ = (__temp_a102__ + 0.166f * __temp_a103__);
float __temp_a105__ = (__tilevar_4__[__iter_26__-__iter_0__+FORMA_BLOCKDIM_X*(__iter_25__+(EXTENT-__iter_1__)+FORMA_BLOCKDIM_Y*mod(__iter_24__,rowz))]);
float __temp_a106__ = (__temp_a104__ - 1.670f * __temp_a105__);
__tilevar_5__[__iter_26__-__iter_0__+FORMA_BLOCKDIM_X*(__iter_25__+(EXTENT-__iter_1__)+FORMA_BLOCKDIM_Y*mod(__iter_24__,rowz))] = __temp_a106__;
}
}
}
int __iter_27__ = FORMA_MAX((__iter_2__-3),1) + (int)(threadIdx.z) ;
if(__iter_27__ <= FORMA_MIN(((__iter_2__+FORMA_BLOCKDIM_Z)-4),(L-2))) {
int __iter_28__ = FORMA_MAX((__iter_1__-3),1) + (int)(threadIdx.y) ;
for(; __iter_28__ <= FORMA_MIN(((__iter_1__+GAPY+3)-1),(M-2)) ; __iter_28__ += (int)(blockDim.y)){
int __iter_29__ = FORMA_MAX((__iter_0__+3),1) + (int)(threadIdx.x) ;
if( __iter_29__ <= FORMA_MIN(((__iter_0__+FORMA_BLOCKDIM_X)-4),(N-2)) ){
if (__iter_29__ < (FORMA_MAX((__iter_0__+3),1)+2) || __iter_29__ > (FORMA_MIN(((__iter_0__+FORMA_BLOCKDIM_X)-4),(N-2))-2)) {
__copy_arr_2__[__iter_29__+N*(__iter_28__+M*(__iter_27__))] = __tilevar_5__[__iter_29__-__iter_0__+FORMA_BLOCKDIM_X*(__iter_28__+(EXTENT-__iter_1__)+FORMA_BLOCKDIM_Y*mod(__iter_27__,rowz))];
}
}
}
}
__iter_27__ = FORMA_MAX((__iter_2__-3),1) + (int)(threadIdx.z) ;
if(__iter_27__ <= FORMA_MIN(((__iter_2__+FORMA_BLOCKDIM_Z)-4),(L-2))) {
int __iter_28__ = FORMA_MAX((__iter_1__-5),1) + (int)(threadIdx.y) ;
for(; __iter_28__ <= FORMA_MIN(((__iter_1__+GAPY+5)-1),(M-2)) ; __iter_28__ += (int)(blockDim.y)){
int __iter_29__ = FORMA_MAX((__iter_0__+3),1) + (int)(threadIdx.x) ;
if( __iter_29__ <= FORMA_MIN(((__iter_0__+FORMA_BLOCKDIM_X)-4),(N-2)) ){
if (__iter_28__ < FORMA_MAX((__iter_1__-3),1) || __iter_28__ > FORMA_MIN(((__iter_1__+GAPY+3)-1),(M-2))) {
__tilevar_5__[__iter_29__-__iter_0__+FORMA_BLOCKDIM_X*(__iter_28__+(EXTENT-__iter_1__)+FORMA_BLOCKDIM_Y*mod(__iter_27__,rowz))] = __copy_arr_2__[__iter_29__+N*(__iter_28__+M*(__iter_27__))];
}
}
}
}
__syncthreads();
int __iter_33__ = FORMA_MAX((__iter_2__-4),1) + (int)(threadIdx.z) ;
if(__iter_33__ <= FORMA_MIN(((__iter_2__+FORMA_BLOCKDIM_Z)-5),(L-2))) {
int __iter_34__ = FORMA_MAX((__iter_1__-4),1) + (int)(threadIdx.y) ;
for(; __iter_34__ <= FORMA_MIN(((__iter_1__+GAPY+4)-1),(M-2)); __iter_34__+=(int)(blockDim.y)) {
int __iter_35__ = FORMA_MAX((__iter_0__+4),1) + (int)(threadIdx.x) ;
if( __iter_35__ <= FORMA_MIN(((__iter_0__+FORMA_BLOCKDIM_X)-5),(N-2)) ){
float __temp_a120__ = (__tilevar_5__[__iter_35__+1-__iter_0__+FORMA_BLOCKDIM_X*(__iter_34__+(EXTENT-__iter_1__)+FORMA_BLOCKDIM_Y*mod(__iter_33__,rowz))]);
float __temp_a121__ = (__tilevar_5__[__iter_35__-1-__iter_0__+FORMA_BLOCKDIM_X*(__iter_34__+(EXTENT-__iter_1__)+FORMA_BLOCKDIM_Y*mod(__iter_33__,rowz))]);
float __temp_a122__ = (0.161f * __temp_a120__ + 0.162f * __temp_a121__);
float __temp_a123__ = (__tilevar_5__[__iter_35__-__iter_0__+FORMA_BLOCKDIM_X*(__iter_34__+1+(EXTENT-__iter_1__)+FORMA_BLOCKDIM_Y*mod(__iter_33__,rowz))]);
float __temp_a124__ = (__temp_a122__ + 0.163f * __temp_a123__);
float __temp_a125__ = (__tilevar_5__[__iter_35__-__iter_0__+FORMA_BLOCKDIM_X*(__iter_34__-1+(EXTENT-__iter_1__)+FORMA_BLOCKDIM_Y*mod(__iter_33__,rowz))]);
float __temp_a126__ = (__temp_a124__ + 0.164f * __temp_a125__);
float __temp_a127__ = (__tilevar_5__[__iter_35__-__iter_0__+FORMA_BLOCKDIM_X*(__iter_34__+(EXTENT-__iter_1__)+FORMA_BLOCKDIM_Y*mod(__iter_33__+1,rowz))]);
float __temp_a128__ = (__temp_a126__ + 0.165f * __temp_a127__);
float __temp_a129__ = (__tilevar_5__[__iter_35__-__iter_0__+FORMA_BLOCKDIM_X*(__iter_34__+(EXTENT-__iter_1__)+FORMA_BLOCKDIM_Y*mod(__iter_33__-1,rowz))]);
float __temp_a130__ = (__temp_a128__ + 0.166f * __temp_a129__);
float __temp_a131__ = (__tilevar_5__[__iter_35__-__iter_0__+FORMA_BLOCKDIM_X*(__iter_34__+(EXTENT-__iter_1__)+FORMA_BLOCKDIM_Y*mod(__iter_33__,rowz))]);
float __temp_a132__ = (__temp_a130__ - 1.670f * __temp_a131__);
__var_1__[__iter_35__+N*(__iter_34__+M*(__iter_33__))] = __temp_a132__;
}
}
}
}
}
/* X+GAP, Y+GAP, Z */
__global__ void __kernel___forma_kernel__3__(float * __restrict__ input, int L, int M, int N, float * __restrict__ __copy_arr_0__, float * __restrict__ __copy_arr_1__, float * __restrict__ __copy_arr_2__, int FORMA_BLOCKDIM_X, int FORMA_BLOCKDIM_Y, int FORMA_BLOCKDIM_Z, float * __restrict__ __var_1__){
int __FORMA_SHARED_MEM_OFFSET__ = 0;
float* __tilevar_0__ = (float*)(__FORMA_SHARED_MEM__+__FORMA_SHARED_MEM_OFFSET__);
__FORMA_SHARED_MEM_OFFSET__ += sizeof(float)*(((FORMA_BLOCKDIM_Z+4)*FORMA_BLOCKDIM_Y*FORMA_BLOCKDIM_X));
float* __tilevar_1__ = (float*)(__FORMA_SHARED_MEM__+__FORMA_SHARED_MEM_OFFSET__);
__FORMA_SHARED_MEM_OFFSET__ += sizeof(float)*(((FORMA_BLOCKDIM_Z+4)*FORMA_BLOCKDIM_Y*FORMA_BLOCKDIM_X));
float * __tilevar_2__ = __tilevar_0__;
float * __tilevar_3__ = __tilevar_1__;
float * __tilevar_4__ = __tilevar_0__;
float * __tilevar_5__ = __tilevar_1__;
int rowz = FORMA_BLOCKDIM_Z+4;
int __iter_0__ = (int)(blockIdx.x)*((int)(FORMA_BLOCKDIM_X)+GAPX) + (int)(FORMA_BLOCKDIM_X);
int __iter_1__ = (int)(blockIdx.y)*((int)(FORMA_BLOCKDIM_Y)+GAPY) + (int)(FORMA_BLOCKDIM_Y);
for (int __iter_2__ = 0; __iter_2__ <= L-1; __iter_2__ += FORMA_BLOCKDIM_Z) {
int __iter_3__ = FORMA_MAX(__iter_2__,0) + (int)(threadIdx.z) ;
if(__iter_3__ <= FORMA_MIN(((__iter_2__+FORMA_BLOCKDIM_Z)-1),(L-1))) {
int __iter_4__ = FORMA_MAX(__iter_1__-2,0) + (int)(threadIdx.y) ;
for(; __iter_4__ <= FORMA_MIN(((__iter_1__+GAPY+2)-1),(M-1)); __iter_4__+=(int)(blockDim.y)) {
int __iter_5__ = FORMA_MAX(__iter_0__-2,0) + (int)(threadIdx.x) ;
if( __iter_5__ <= FORMA_MIN(((__iter_0__+GAPX+2)-1),(N-1)) ){
__tilevar_2__[__iter_5__+(EXTENT-__iter_0__)+FORMA_BLOCKDIM_X*(__iter_4__+(EXTENT-__iter_1__)+FORMA_BLOCKDIM_Y*mod(__iter_3__,rowz))] = input[__iter_5__+N*(__iter_4__+M*(__iter_3__))];
}
}
}
__syncthreads();
int __iter_6__ = FORMA_MAX((__iter_2__-1),1) + (int)(threadIdx.z) ;
if(__iter_6__ <= FORMA_MIN(((__iter_2__+FORMA_BLOCKDIM_Z)-2),(L-2))) {
int __iter_7__ = FORMA_MAX((__iter_1__-1),1) + (int)(threadIdx.y) ;
for(; __iter_7__ <= FORMA_MIN(((__iter_1__+GAPY+1)-1),(M-2)); __iter_7__+=(int)(blockDim.y)) {
int __iter_8__ = FORMA_MAX((__iter_0__-1),1) + (int)(threadIdx.x) ;
if( __iter_8__ <= FORMA_MIN(((__iter_0__+GAPX+1)-1),(N-2)) ){
float __temp_a3__ = (__tilevar_2__[__iter_8__+1+(EXTENT-__iter_0__)+FORMA_BLOCKDIM_X*(__iter_7__+(EXTENT-__iter_1__)+FORMA_BLOCKDIM_Y*mod(__iter_6__,rowz))]);
float __temp_a7__ = (__tilevar_2__[__iter_8__-1+(EXTENT-__iter_0__)+FORMA_BLOCKDIM_X*(__iter_7__+(EXTENT-__iter_1__)+FORMA_BLOCKDIM_Y*mod(__iter_6__,rowz))]);
float __temp_a8__ = (0.161f * __temp_a3__ + 0.162f * __temp_a7__);
float __temp_a12__ = (__tilevar_2__[__iter_8__+(EXTENT-__iter_0__)+FORMA_BLOCKDIM_X*(__iter_7__+1+(EXTENT-__iter_1__)+FORMA_BLOCKDIM_Y*mod(__iter_6__,rowz))]);
float __temp_a13__ = (__temp_a8__ + 0.163f * __temp_a12__);
float __temp_a17__ = (__tilevar_2__[__iter_8__+(EXTENT-__iter_0__)+FORMA_BLOCKDIM_X*(__iter_7__-1+(EXTENT-__iter_1__)+FORMA_BLOCKDIM_Y*mod(__iter_6__,rowz))]);
float __temp_a18__ = (__temp_a13__ + 0.164f * __temp_a17__);
float __temp_a22__ = (__tilevar_2__[__iter_8__+(EXTENT-__iter_0__)+FORMA_BLOCKDIM_X*(__iter_7__+(EXTENT-__iter_1__)+FORMA_BLOCKDIM_Y*mod(__iter_6__+1,rowz))]);
float __temp_a23__ = (__temp_a18__ + 0.165f * __temp_a22__);
float __temp_a27__ = (__tilevar_2__[__iter_8__+(EXTENT-__iter_0__)+FORMA_BLOCKDIM_X*(__iter_7__+(EXTENT-__iter_1__)+FORMA_BLOCKDIM_Y*mod(__iter_6__-1,rowz))]);
float __temp_a28__ = (__temp_a23__ + 0.166f * __temp_a27__);
float __temp_a32__ = (__tilevar_2__[__iter_8__+(EXTENT-__iter_0__)+FORMA_BLOCKDIM_X*(__iter_7__+(EXTENT-__iter_1__)+FORMA_BLOCKDIM_Y*mod(__iter_6__,rowz))]);
float __temp_a33__ = (__temp_a28__ - 1.670f * __temp_a32__);
__tilevar_3__[__iter_8__+(EXTENT-__iter_0__)+FORMA_BLOCKDIM_X*(__iter_7__+(EXTENT-__iter_1__)+FORMA_BLOCKDIM_Y*mod(__iter_6__,rowz))] = __temp_a33__;
}
}
}
int __iter_9__ = FORMA_MAX((__iter_2__-1),1) + (int)(threadIdx.z) ;
if(__iter_9__ <= FORMA_MIN(((__iter_2__+FORMA_BLOCKDIM_Z)-2),(L-2))) {
int __iter_10__ = FORMA_MAX((__iter_1__-3),1) + (int)(threadIdx.y) ;
for(; __iter_10__ <= FORMA_MIN(((__iter_1__+GAPY+3)-1),(M-2)) ; __iter_10__ += (int)(blockDim.y)){
int __iter_11__ = FORMA_MAX((__iter_0__-3),1) + (int)(threadIdx.x) ;
if( __iter_11__ <= FORMA_MIN(((__iter_0__+GAPX+3)-1),(N-2)) ){
if (__iter_10__ < FORMA_MAX((__iter_1__-1),1) || __iter_10__ > FORMA_MIN(((__iter_1__+GAPY+1)-1),(M-2)) || __iter_11__ < FORMA_MAX((__iter_0__-1),1) || __iter_11__ > FORMA_MIN(((__iter_0__+GAPX+1)-1),(N-2))) {
__tilevar_3__[__iter_11__+(EXTENT-__iter_0__)+FORMA_BLOCKDIM_X*(__iter_10__+(EXTENT-__iter_1__)+FORMA_BLOCKDIM_Y*mod(__iter_9__,rowz))] = __copy_arr_0__[__iter_11__+N*(__iter_10__+M*(__iter_9__))];
}
}
}
}
__syncthreads();
int __iter_15__ = FORMA_MAX((__iter_2__-2),1) + (int)(threadIdx.z) ;
if(__iter_15__ <= FORMA_MIN(((__iter_2__+FORMA_BLOCKDIM_Z)-3),(L-2))) {
int __iter_16__ = FORMA_MAX((__iter_1__-2),1) + (int)(threadIdx.y) ;
for(; __iter_16__ <= FORMA_MIN(((__iter_1__+GAPY+2)-1),(M-2)); __iter_16__+=(int)(blockDim.y)) {
int __iter_17__ = FORMA_MAX((__iter_0__-2),1) + (int)(threadIdx.x) ;
if( __iter_17__ <= FORMA_MIN(((__iter_0__+GAPX+2)-1),(N-2)) ){
float __temp_a50__ = (__tilevar_3__[__iter_17__+1+(EXTENT-__iter_0__)+FORMA_BLOCKDIM_X*(__iter_16__+(EXTENT-__iter_1__)+FORMA_BLOCKDIM_Y*mod(__iter_15__,rowz))]);
float __temp_a54__ = (__tilevar_3__[__iter_17__-1+(EXTENT-__iter_0__)+FORMA_BLOCKDIM_X*(__iter_16__+(EXTENT-__iter_1__)+FORMA_BLOCKDIM_Y*mod(__iter_15__,rowz))]);
float __temp_a55__ = (0.161f * __temp_a50__ + 0.162f * __temp_a54__);
float __temp_a59__ = (__tilevar_3__[__iter_17__+(EXTENT-__iter_0__)+FORMA_BLOCKDIM_X*(__iter_16__+1+(EXTENT-__iter_1__)+FORMA_BLOCKDIM_Y*mod(__iter_15__,rowz))]);
float __temp_a60__ = (__temp_a55__ + 0.163f * __temp_a59__);
float __temp_a64__ = (__tilevar_3__[__iter_17__+(EXTENT-__iter_0__)+FORMA_BLOCKDIM_X*(__iter_16__-1+(EXTENT-__iter_1__)+FORMA_BLOCKDIM_Y*mod(__iter_15__,rowz))]);
float __temp_a65__ = (__temp_a60__ + 0.164f * __temp_a64__);
float __temp_a69__ = (__tilevar_3__[__iter_17__+(EXTENT-__iter_0__)+FORMA_BLOCKDIM_X*(__iter_16__+(EXTENT-__iter_1__)+FORMA_BLOCKDIM_Y*mod(__iter_15__+1,rowz))]);
float __temp_a70__ = (__temp_a65__ + 0.165f * __temp_a69__);
float __temp_a74__ = (__tilevar_3__[__iter_17__+(EXTENT-__iter_0__)+FORMA_BLOCKDIM_X*(__iter_16__+(EXTENT-__iter_1__)+FORMA_BLOCKDIM_Y*mod(__iter_15__-1,rowz))]);
float __temp_a75__ = (__temp_a70__ + 0.166f * __temp_a74__);
float __temp_a79__ = (__tilevar_3__[__iter_17__+(EXTENT-__iter_0__)+FORMA_BLOCKDIM_X*(__iter_16__+(EXTENT-__iter_1__)+FORMA_BLOCKDIM_Y*mod(__iter_15__,rowz))]);
float __temp_a80__ = (__temp_a75__ - 1.670f * __temp_a79__);
__tilevar_4__[__iter_17__+(EXTENT-__iter_0__)+FORMA_BLOCKDIM_X*(__iter_16__+(EXTENT-__iter_1__)+FORMA_BLOCKDIM_Y*mod(__iter_15__,rowz))] = __temp_a80__;
}
}
}
int __iter_18__ = FORMA_MAX((__iter_2__-2),1) + (int)(threadIdx.z) ;
if(__iter_18__ <= FORMA_MIN(((__iter_2__+FORMA_BLOCKDIM_Z)-3),(L-2))) {
int __iter_19__ = FORMA_MAX((__iter_1__-4),1) + (int)(threadIdx.y) ;
for(; __iter_19__ <= FORMA_MIN(((__iter_1__+GAPY+4)-1),(M-2)) ; __iter_19__ += (int)(blockDim.y) ){
int __iter_20__ = FORMA_MAX((__iter_0__-4),1) + (int)(threadIdx.x) ;
if( __iter_20__ <= FORMA_MIN(((__iter_0__+GAPX+4)-1),(N-2)) ){
if (__iter_19__ < (FORMA_MAX((__iter_1__-2),1)) || __iter_19__ > (FORMA_MIN(((__iter_1__+GAPY+2)-1),(M-2))) || __iter_20__ < FORMA_MAX((__iter_0__-2),1) || __iter_20__ > FORMA_MIN(((__iter_0__+GAPX+2)-1),(N-2))) {
__tilevar_4__[__iter_20__+(EXTENT-__iter_0__)+FORMA_BLOCKDIM_X*(__iter_19__+(EXTENT-__iter_1__)+FORMA_BLOCKDIM_Y*mod(__iter_18__,rowz))] = __copy_arr_1__[__iter_20__+N*(__iter_19__+M*(__iter_18__))];
}
}
}
}
__syncthreads();
int __iter_24__ = FORMA_MAX((__iter_2__-3),1) + (int)(threadIdx.z) ;
if(__iter_24__ <= FORMA_MIN(((__iter_2__+FORMA_BLOCKDIM_Z)-4),(L-2))) {
int __iter_25__ = FORMA_MAX((__iter_1__-3),1) + (int)(threadIdx.y) ;
for(; __iter_25__ <= FORMA_MIN(((__iter_1__+GAPY+3)-1),(M-2)); __iter_25__+=(int)(blockDim.y)) {
int __iter_26__ = FORMA_MAX((__iter_0__-3),1) + (int)(threadIdx.x) ;
if( __iter_26__ <= FORMA_MIN(((__iter_0__+GAPX+3)-1),(N-2)) ){
float __temp_a94__ = (__tilevar_4__[__iter_26__+1+(EXTENT-__iter_0__)+FORMA_BLOCKDIM_X*(__iter_25__+(EXTENT-__iter_1__)+FORMA_BLOCKDIM_Y*mod(__iter_24__,rowz))]);
float __temp_a95__ = (__tilevar_4__[__iter_26__-1+(EXTENT-__iter_0__)+FORMA_BLOCKDIM_X*(__iter_25__+(EXTENT-__iter_1__)+FORMA_BLOCKDIM_Y*mod(__iter_24__,rowz))]);
float __temp_a96__ = (0.161f * __temp_a94__ + 0.162f * __temp_a95__);
float __temp_a97__ = (__tilevar_4__[__iter_26__+(EXTENT-__iter_0__)+FORMA_BLOCKDIM_X*(__iter_25__+1+(EXTENT-__iter_1__)+FORMA_BLOCKDIM_Y*mod(__iter_24__,rowz))]);
float __temp_a98__ = (__temp_a96__ + 0.163f * __temp_a97__);
float __temp_a99__ = (__tilevar_4__[__iter_26__+(EXTENT-__iter_0__)+FORMA_BLOCKDIM_X*(__iter_25__-1+(EXTENT-__iter_1__)+FORMA_BLOCKDIM_Y*mod(__iter_24__,rowz))]);
float __temp_a100__ = (__temp_a98__ + 0.164f * __temp_a99__);
float __temp_a101__ = (__tilevar_4__[__iter_26__+(EXTENT-__iter_0__)+FORMA_BLOCKDIM_X*(__iter_25__+(EXTENT-__iter_1__)+FORMA_BLOCKDIM_Y*mod(__iter_24__+1,rowz))]);
float __temp_a102__ = (__temp_a100__ + 0.165f * __temp_a101__);
float __temp_a103__ = (__tilevar_4__[__iter_26__+(EXTENT-__iter_0__)+FORMA_BLOCKDIM_X*(__iter_25__+(EXTENT-__iter_1__)+FORMA_BLOCKDIM_Y*mod(__iter_24__-1,rowz))]);
float __temp_a104__ = (__temp_a102__ + 0.166f * __temp_a103__);
float __temp_a105__ = (__tilevar_4__[__iter_26__+(EXTENT-__iter_0__)+FORMA_BLOCKDIM_X*(__iter_25__+(EXTENT-__iter_1__)+FORMA_BLOCKDIM_Y*mod(__iter_24__,rowz))]);
float __temp_a106__ = (__temp_a104__ - 1.670f * __temp_a105__);
__tilevar_5__[__iter_26__+(EXTENT-__iter_0__)+FORMA_BLOCKDIM_X*(__iter_25__+(EXTENT-__iter_1__)+FORMA_BLOCKDIM_Y*mod(__iter_24__,rowz))] = __temp_a106__;
}
}
}
int __iter_27__ = FORMA_MAX((__iter_2__-3),1) + (int)(threadIdx.z) ;
if(__iter_27__ <= FORMA_MIN(((__iter_2__+FORMA_BLOCKDIM_Z)-4),(L-2))) {
int __iter_28__ = FORMA_MAX((__iter_1__-5),1) + (int)(threadIdx.y) ;
for(; __iter_28__ <= FORMA_MIN(((__iter_1__+GAPY+5)-1),(M-2)) ; __iter_28__ += (int)(blockDim.y)){
int __iter_29__ = FORMA_MAX((__iter_0__-5),1) + (int)(threadIdx.x) ;
if( __iter_29__ <= FORMA_MIN(((__iter_0__+GAPX+5)-1),(N-2)) ){
if (__iter_28__ < (FORMA_MAX((__iter_1__-3),1)) || __iter_28__ > (FORMA_MIN(((__iter_1__+GAPY+3)-1),(M-2))) || __iter_29__ < FORMA_MAX((__iter_0__-3),1) || __iter_29__ > FORMA_MIN(((__iter_0__+GAPX+3)-1),(N-2))) {
__tilevar_5__[__iter_29__+(EXTENT-__iter_0__)+FORMA_BLOCKDIM_X*(__iter_28__+(EXTENT-__iter_1__)+FORMA_BLOCKDIM_Y*mod(__iter_27__,rowz))] = __copy_arr_2__[__iter_29__+N*(__iter_28__+M*(__iter_27__))];
}
}
}
}
__syncthreads();
int __iter_33__ = FORMA_MAX((__iter_2__-4),1) + (int)(threadIdx.z) ;
if(__iter_33__ <= FORMA_MIN(((__iter_2__+FORMA_BLOCKDIM_Z)-5),(L-2))) {
int __iter_34__ = FORMA_MAX((__iter_1__-4),1) + (int)(threadIdx.y) ;
for(; __iter_34__ <= FORMA_MIN(((__iter_1__+GAPY+4)-1),(M-2)); __iter_34__+=(int)(blockDim.y)) {
int __iter_35__ = FORMA_MAX((__iter_0__-4),1) + (int)(threadIdx.x) ;
if( __iter_35__ <= FORMA_MIN(((__iter_0__+GAPX+4)-1),(N-2)) ){
float __temp_a120__ = (__tilevar_5__[__iter_35__+1+(EXTENT-__iter_0__)+FORMA_BLOCKDIM_X*(__iter_34__+(EXTENT-__iter_1__)+FORMA_BLOCKDIM_Y*mod(__iter_33__,rowz))]);
float __temp_a121__ = (__tilevar_5__[__iter_35__-1+(EXTENT-__iter_0__)+FORMA_BLOCKDIM_X*(__iter_34__+(EXTENT-__iter_1__)+FORMA_BLOCKDIM_Y*mod(__iter_33__,rowz))]);
float __temp_a122__ = (0.161f * __temp_a120__ + 0.162f * __temp_a121__);
float __temp_a123__ = (__tilevar_5__[__iter_35__+(EXTENT-__iter_0__)+FORMA_BLOCKDIM_X*(__iter_34__+1+(EXTENT-__iter_1__)+FORMA_BLOCKDIM_Y*mod(__iter_33__,rowz))]);
float __temp_a124__ = (__temp_a122__ + 0.163f * __temp_a123__);
float __temp_a125__ = (__tilevar_5__[__iter_35__+(EXTENT-__iter_0__)+FORMA_BLOCKDIM_X*(__iter_34__-1+(EXTENT-__iter_1__)+FORMA_BLOCKDIM_Y*mod(__iter_33__,rowz))]);
float __temp_a126__ = (__temp_a124__ + 0.164f * __temp_a125__);
float __temp_a127__ = (__tilevar_5__[__iter_35__+(EXTENT-__iter_0__)+FORMA_BLOCKDIM_X*(__iter_34__+(EXTENT-__iter_1__)+FORMA_BLOCKDIM_Y*mod(__iter_33__+1,rowz))]);
float __temp_a128__ = (__temp_a126__ + 0.165f * __temp_a127__);
float __temp_a129__ = (__tilevar_5__[__iter_35__+(EXTENT-__iter_0__)+FORMA_BLOCKDIM_X*(__iter_34__+(EXTENT-__iter_1__)+FORMA_BLOCKDIM_Y*mod(__iter_33__-1,rowz))]);
float __temp_a130__ = (__temp_a128__ + 0.166f * __temp_a129__);
float __temp_a131__ = (__tilevar_5__[__iter_35__+(EXTENT-__iter_0__)+FORMA_BLOCKDIM_X*(__iter_34__+(EXTENT-__iter_1__)+FORMA_BLOCKDIM_Y*mod(__iter_33__,rowz))]);
float __temp_a132__ = (__temp_a130__ - 1.670f * __temp_a131__);
__var_1__[__iter_35__+N*(__iter_34__+M*(__iter_33__))] = __temp_a132__;
}
}
}
}
}
/*Device code End */
/* Host Code Begin */
extern "C" void j3d7pt(float * h_input, int L, int M, int N, float * __var_0__){
/* Host allocation Begin */
float * input;
hipMalloc(&input,sizeof(float)*(L*M*N));
Check_CUDA_Error("Allocation Error!! : input\n");
hipPointerAttribute_t ptrAttrib_h_input;
hipMemcpyKind memcpy_kind_h_input = hipMemcpyHostToDevice;
if (hipPointerGetAttributes(&ptrAttrib_h_input, h_input) == hipSuccess)
if (ptrAttrib_h_input.memoryType == hipMemoryTypeDevice)
memcpy_kind_h_input = hipMemcpyDeviceToDevice;
hipGetLastError();
if( memcpy_kind_h_input != hipMemcpyDeviceToDevice ){
hipMemcpy(input,h_input,sizeof(float)*(L*M*N), memcpy_kind_h_input);
}
float * __var_1__;
hipMalloc(&__var_1__,sizeof(float)*(L*M*N));
Check_CUDA_Error("Allocation Error!! : __var_1__\n");
float * __copy_arr_0__;
hipMalloc(&__copy_arr_0__,sizeof(float)*(L*M*N));
Check_CUDA_Error("Allocation Error!! : __copy_arr_0__\n");
float * __copy_arr_1__;
hipMalloc(&__copy_arr_1__,sizeof(float)*(L*M*N));
Check_CUDA_Error("Allocation Error!! : __copy_arr_1__\n");
float * __copy_arr_2__;
hipMalloc(&__copy_arr_2__,sizeof(float)*(L*M*N));
Check_CUDA_Error("Allocation Error!! : __copy_arr_2__\n");
/*Host Allocation End */
/* Kernel Launch Begin */
int __FORMA_MAX_SHARED_MEM__;
hipDeviceGetAttribute(&__FORMA_MAX_SHARED_MEM__,hipDeviceAttributeMaxSharedMemoryPerBlock,0);
#ifdef _TIMER_
hipEvent_t _forma_timer_start_,_forma_timer_stop_;
hipEventCreate(&_forma_timer_start_);
hipEventCreate(&_forma_timer_stop_);
hipEventRecord(_forma_timer_start_,0);
#endif
int __size_0___kernel___forma_kernel__0__ = N;
int __size_1___kernel___forma_kernel__0__ = M;
int __block_0___kernel___forma_kernel__0__ = 32;
int __block_1___kernel___forma_kernel__0__ = 12;
int __block_2___kernel___forma_kernel__0__ = 4;
dim3 __blockConfig___kernel___forma_kernel__0__(__block_0___kernel___forma_kernel__0__,__block_1___kernel___forma_kernel__0__,__block_2___kernel___forma_kernel__0__);
int __SMemSize___kernel___forma_kernel__0__ = 0;
__SMemSize___kernel___forma_kernel__0__ = __blockSizeToSMemSize___kernel___forma_kernel__0__(__blockConfig___kernel___forma_kernel__0__);
int __grid_0___kernel___forma_kernel__0__ = FORMA_CEIL(__size_0___kernel___forma_kernel__0__,__blockConfig___kernel___forma_kernel__0__.x+GAPX);
int __grid_1___kernel___forma_kernel__0__ = FORMA_CEIL(__size_1___kernel___forma_kernel__0__,__blockConfig___kernel___forma_kernel__0__.y+GAPY);
int __grid_2___kernel___forma_kernel__0__ = 1;
dim3 __gridConfig___kernel___forma_kernel__0__(__grid_0___kernel___forma_kernel__0__,__grid_1___kernel___forma_kernel__0__,__grid_2___kernel___forma_kernel__0__);
dim3 unrollConfig (__blockConfig___kernel___forma_kernel__0__.x, __blockConfig___kernel___forma_kernel__0__.y/3, __blockConfig___kernel___forma_kernel__0__.z);
hipLaunchKernelGGL(( __kernel___forma_kernel__0__), dim3(__gridConfig___kernel___forma_kernel__0__), dim3(unrollConfig), __SMemSize___kernel___forma_kernel__0__, 0, input, L, M, N, __copy_arr_0__, __copy_arr_1__, __copy_arr_2__, __blockConfig___kernel___forma_kernel__0__.x, __blockConfig___kernel___forma_kernel__0__.y, __blockConfig___kernel___forma_kernel__0__.z, __var_1__);
Check_CUDA_Error("Kernel Launch Error!! : __kernel___forma_kernel__0__\n");
hipLaunchKernelGGL(( __kernel___forma_kernel__1__), dim3(__gridConfig___kernel___forma_kernel__0__), dim3(unrollConfig), __SMemSize___kernel___forma_kernel__0__, 0, input, L, M, N, __copy_arr_0__, __copy_arr_1__, __copy_arr_2__, __blockConfig___kernel___forma_kernel__0__.x, __blockConfig___kernel___forma_kernel__0__.y, __blockConfig___kernel___forma_kernel__0__.z, __var_1__);
Check_CUDA_Error("Kernel Launch Error!! : __kernel___forma_kernel__1__\n");
hipLaunchKernelGGL(( __kernel___forma_kernel__2__), dim3(__gridConfig___kernel___forma_kernel__0__), dim3(unrollConfig), __SMemSize___kernel___forma_kernel__0__, 0, input, L, M, N, __copy_arr_0__, __copy_arr_1__, __copy_arr_2__, __blockConfig___kernel___forma_kernel__0__.x, __blockConfig___kernel___forma_kernel__0__.y, __blockConfig___kernel___forma_kernel__0__.z, __var_1__);
Check_CUDA_Error("Kernel Launch Error!! : __kernel___forma_kernel__2__\n");
hipLaunchKernelGGL(( __kernel___forma_kernel__3__), dim3(__gridConfig___kernel___forma_kernel__0__), dim3(unrollConfig), __SMemSize___kernel___forma_kernel__0__, 0, input, L, M, N, __copy_arr_0__, __copy_arr_1__, __copy_arr_2__, __blockConfig___kernel___forma_kernel__0__.x, __blockConfig___kernel___forma_kernel__0__.y, __blockConfig___kernel___forma_kernel__0__.z, __var_1__);
Check_CUDA_Error("Kernel Launch Error!! : __kernel___forma_kernel__3__\n");
hipPointerAttribute_t ptrAttrib___var_0__;
hipMemcpyKind memcpy_kind___var_0__ = hipMemcpyDeviceToHost;
if (hipPointerGetAttributes(&ptrAttrib___var_0__, __var_0__) == hipSuccess)
if (ptrAttrib___var_0__.memoryType == hipMemoryTypeDevice)
memcpy_kind___var_0__ = hipMemcpyDeviceToDevice;
hipGetLastError();
hipMemcpy(__var_0__,__var_1__, sizeof(float)*(L*M*N), memcpy_kind___var_0__);
#ifdef _TIMER_
hipEventRecord(_forma_timer_stop_,0);
hipEventSynchronize(_forma_timer_stop_);
float elapsedTime;
hipEventElapsedTime(&elapsedTime,_forma_timer_start_,_forma_timer_stop_);
printf("[FORMA] Computation Time(ms) : %lf\n",elapsedTime);
hipEventDestroy(_forma_timer_start_);
hipEventDestroy(_forma_timer_stop_);
#endif
/*Kernel Launch End */
/* Host Free Begin */
hipFree(input);
hipFree(__var_1__);
hipFree(__copy_arr_0__);
hipFree(__copy_arr_1__);
hipFree(__copy_arr_2__);
}
/*Host Free End*/
| 5a3363c2ad0859355f3f0c6b80bb69d0f51253b6.cu | #include "cuda.h"
#ifdef _TIMER_
#include "cuda_profiler_api.h"
#endif
#include "stdio.h"
#define FORMA_MAX(a,b) ( (a) > (b) ? (a) : (b) )
#define max(a,b) FORMA_MAX(a,b)
#define FORMA_MIN(a,b) ( (a) < (b) ? (a) : (b) )
#define min(a,b) FORMA_MIN(a,b)
#define FORMA_CEIL(a,b) ( (a) % (b) == 0 ? (a) / (b) : ((a) / (b)) + 1 )
#define mod(x,y) ( (x) & (y-1))
#ifndef FORMA_MAX_BLOCKDIM_0
#define FORMA_MAX_BLOCKDIM_0 1024
#endif
#ifndef FORMA_MAX_BLOCKDIM_1
#define FORMA_MAX_BLOCKDIM_1 1024
#endif
#ifndef FORMA_MAX_BLOCKDIM_2
#define FORMA_MAX_BLOCKDIM_2 1024
#endif
#define GAPX (22)
#define GAPY (2)
#define EXTENT (5)
template<typename T>
__global__ void __kernel_init__(T* input, T value)
{
int loc = (int)(blockIdx.x)*(int)(blockDim.x)+(int)(threadIdx.x);
input[loc] = value;
}
template<typename T>
void initialize_array(T* d_input, int size, T value)
{
dim3 init_grid(FORMA_CEIL(size,FORMA_MAX_BLOCKDIM_0));
dim3 init_block(FORMA_MAX_BLOCKDIM_0);
__kernel_init__<<<init_grid,init_block>>>(d_input,value);
}
void Check_CUDA_Error(const char* message);
/*Texture references */
/*Shared Memory Variable */
extern __shared__ char __FORMA_SHARED_MEM__[];
/* Device code Begin */
/* X, Y, Z */
__global__ void __kernel___forma_kernel__0__(float * __restrict__ input, int L, int M, int N, float * __restrict__ __copy_arr_0__, float * __restrict__ __copy_arr_1__, float * __restrict__ __copy_arr_2__, int FORMA_BLOCKDIM_X, int FORMA_BLOCKDIM_Y, int FORMA_BLOCKDIM_Z, float * __restrict__ __var_1__){
int __FORMA_SHARED_MEM_OFFSET__ = 0;
float* __tilevar_0__ = (float*)(__FORMA_SHARED_MEM__+__FORMA_SHARED_MEM_OFFSET__);
__FORMA_SHARED_MEM_OFFSET__ += sizeof(float)*(((FORMA_BLOCKDIM_Z+4)*FORMA_BLOCKDIM_Y*FORMA_BLOCKDIM_X));
float* __tilevar_1__ = (float*)(__FORMA_SHARED_MEM__+__FORMA_SHARED_MEM_OFFSET__);
__FORMA_SHARED_MEM_OFFSET__ += sizeof(float)*(((FORMA_BLOCKDIM_Z+4)*FORMA_BLOCKDIM_Y*FORMA_BLOCKDIM_X));
float * __tilevar_2__ = __tilevar_0__;
float * __tilevar_3__ = __tilevar_1__;
float * __tilevar_4__ = __tilevar_0__;
float * __tilevar_5__ = __tilevar_1__;
int rowz = FORMA_BLOCKDIM_Z+4;
int __iter_0__ = (int)(blockIdx.x)*((int)(FORMA_BLOCKDIM_X)+GAPX);
int __iter_1__ = (int)(blockIdx.y)*((int)(FORMA_BLOCKDIM_Y)+GAPY);
for (int __iter_2__ = 0; __iter_2__ <= L-1; __iter_2__ += FORMA_BLOCKDIM_Z) {
int __iter_3__ = FORMA_MAX(__iter_2__,0) + (int)(threadIdx.z) ;
if(__iter_3__ <= FORMA_MIN(((__iter_2__+FORMA_BLOCKDIM_Z)-1),(L-1))) {
int __iter_4__ = FORMA_MAX(__iter_1__,0) + (int)(threadIdx.y) ;
for(; __iter_4__ <= FORMA_MIN(((__iter_1__+FORMA_BLOCKDIM_Y)-1),(M-1)); __iter_4__+=(int)(blockDim.y)) {
int __iter_5__ = FORMA_MAX(__iter_0__,0) + (int)(threadIdx.x) ;
if( __iter_5__ <= FORMA_MIN(((__iter_0__+FORMA_BLOCKDIM_X)-1),(N-1)) ){
__tilevar_2__[__iter_5__-__iter_0__+FORMA_BLOCKDIM_X*(__iter_4__-__iter_1__+FORMA_BLOCKDIM_Y*mod(__iter_3__,rowz))] = input[__iter_5__+N*(__iter_4__+M*(__iter_3__))];
}
}
}
__syncthreads();
int __iter_6__ = FORMA_MAX((__iter_2__-1),1) + (int)(threadIdx.z) ;
if(__iter_6__ <= FORMA_MIN(((__iter_2__+FORMA_BLOCKDIM_Z)-2),(L-2))) {
int __iter_7__ = FORMA_MAX((__iter_1__+1),1) + (int)(threadIdx.y) ;
for(; __iter_7__ <= FORMA_MIN(((__iter_1__+FORMA_BLOCKDIM_Y)-2),(M-2)); __iter_7__+=(int)(blockDim.y)) {
int __iter_8__ = FORMA_MAX((__iter_0__+1),1) + (int)(threadIdx.x) ;
if( __iter_8__ <= FORMA_MIN(((__iter_0__+FORMA_BLOCKDIM_X)-2),(N-2)) ){
float __temp_a3__ = (__tilevar_2__[__iter_8__+1-__iter_0__+FORMA_BLOCKDIM_X*(__iter_7__-__iter_1__+FORMA_BLOCKDIM_Y*mod(__iter_6__,rowz))]);
float __temp_a7__ = (__tilevar_2__[__iter_8__-1-__iter_0__+FORMA_BLOCKDIM_X*(__iter_7__-__iter_1__+FORMA_BLOCKDIM_Y*mod(__iter_6__,rowz))]);
float __temp_a8__ = (0.161f * __temp_a3__ + 0.162f * __temp_a7__);
float __temp_a12__ = (__tilevar_2__[__iter_8__-__iter_0__+FORMA_BLOCKDIM_X*(__iter_7__+1-__iter_1__+FORMA_BLOCKDIM_Y*mod(__iter_6__,rowz))]);
float __temp_a13__ = (__temp_a8__ + 0.163f * __temp_a12__);
float __temp_a17__ = (__tilevar_2__[__iter_8__-__iter_0__+FORMA_BLOCKDIM_X*(__iter_7__-1-__iter_1__+FORMA_BLOCKDIM_Y*mod(__iter_6__,rowz))]);
float __temp_a18__ = (__temp_a13__ + 0.164f * __temp_a17__);
float __temp_a22__ = (__tilevar_2__[__iter_8__-__iter_0__+FORMA_BLOCKDIM_X*(__iter_7__-__iter_1__+FORMA_BLOCKDIM_Y*mod(__iter_6__+1,rowz))]);
float __temp_a23__ = (__temp_a18__ + 0.165f * __temp_a22__);
float __temp_a27__ = (__tilevar_2__[__iter_8__-__iter_0__+FORMA_BLOCKDIM_X*(__iter_7__-__iter_1__+FORMA_BLOCKDIM_Y*mod(__iter_6__-1,rowz))]);
float __temp_a28__ = (__temp_a23__ + 0.166f * __temp_a27__);
float __temp_a32__ = (__tilevar_2__[__iter_8__-__iter_0__+FORMA_BLOCKDIM_X*(__iter_7__-__iter_1__+FORMA_BLOCKDIM_Y*mod(__iter_6__,rowz))]);
float __temp_a33__ = (__temp_a28__ - 1.670f * __temp_a32__);
__tilevar_3__[__iter_8__-__iter_0__+FORMA_BLOCKDIM_X*(__iter_7__-__iter_1__+FORMA_BLOCKDIM_Y*mod(__iter_6__,rowz))] = __temp_a33__;
}
}
}
int __iter_9__ = FORMA_MAX((__iter_2__-1),1) + (int)(threadIdx.z) ;
if(__iter_9__ <= FORMA_MIN(((__iter_2__+FORMA_BLOCKDIM_Z)-2),(L-2))) {
int __iter_10__ = FORMA_MAX((__iter_1__+1),1) + (int)(threadIdx.y) ;
for(; __iter_10__ <= FORMA_MIN(((__iter_1__+FORMA_BLOCKDIM_Y)-2),(M-2)) ; __iter_10__ += (int)(blockDim.y)){
int __iter_11__ = FORMA_MAX((__iter_0__+1),1) + (int)(threadIdx.x) ;
if( __iter_11__ <= FORMA_MIN(((__iter_0__+FORMA_BLOCKDIM_X)-2),(N-2)) ){
if (__iter_10__ < (FORMA_MAX((__iter_1__+1),1)+2) || __iter_10__ > (FORMA_MIN(((__iter_1__+FORMA_BLOCKDIM_Y)-2),(M-2))-2) || __iter_11__ < (FORMA_MAX((__iter_0__+1),1)+2) || __iter_11__ > (FORMA_MIN(((__iter_0__+FORMA_BLOCKDIM_X)-2),(N-2))-2)) {
__copy_arr_0__[__iter_11__+N*(__iter_10__+M*(__iter_9__))] = __tilevar_3__[__iter_11__-__iter_0__+FORMA_BLOCKDIM_X*(__iter_10__-__iter_1__+FORMA_BLOCKDIM_Y*mod(__iter_9__,rowz))];
}
}
}
}
__syncthreads ();
int __iter_15__ = FORMA_MAX((__iter_2__-2),1) + (int)(threadIdx.z) ;
if(__iter_15__ <= FORMA_MIN(((__iter_2__+FORMA_BLOCKDIM_Z)-3),(L-2))) {
int __iter_16__ = FORMA_MAX((__iter_1__+2),1) + (int)(threadIdx.y) ;
for(; __iter_16__ <= FORMA_MIN(((__iter_1__+FORMA_BLOCKDIM_Y)-3),(M-2)); __iter_16__+=(int)(blockDim.y)) {
int __iter_17__ = FORMA_MAX((__iter_0__+2),1) + (int)(threadIdx.x) ;
if( __iter_17__ <= FORMA_MIN(((__iter_0__+FORMA_BLOCKDIM_X)-3),(N-2)) ){
float __temp_a50__ = (__tilevar_3__[__iter_17__+1-__iter_0__+FORMA_BLOCKDIM_X*(__iter_16__-__iter_1__+FORMA_BLOCKDIM_Y*mod(__iter_15__,rowz))]);
float __temp_a54__ = (__tilevar_3__[__iter_17__-1-__iter_0__+FORMA_BLOCKDIM_X*(__iter_16__-__iter_1__+FORMA_BLOCKDIM_Y*mod(__iter_15__,rowz))]);
float __temp_a55__ = (0.161f * __temp_a50__ + 0.162f * __temp_a54__);
float __temp_a59__ = (__tilevar_3__[__iter_17__-__iter_0__+FORMA_BLOCKDIM_X*(__iter_16__+1-__iter_1__+FORMA_BLOCKDIM_Y*mod(__iter_15__,rowz))]);
float __temp_a60__ = (__temp_a55__ + 0.163f * __temp_a59__);
float __temp_a64__ = (__tilevar_3__[__iter_17__-__iter_0__+FORMA_BLOCKDIM_X*(__iter_16__-1-__iter_1__+FORMA_BLOCKDIM_Y*mod(__iter_15__,rowz))]);
float __temp_a65__ = (__temp_a60__ + 0.164f * __temp_a64__);
float __temp_a69__ = (__tilevar_3__[__iter_17__-__iter_0__+FORMA_BLOCKDIM_X*(__iter_16__-__iter_1__+FORMA_BLOCKDIM_Y*mod(__iter_15__+1,rowz))]);
float __temp_a70__ = (__temp_a65__ + 0.165f * __temp_a69__);
float __temp_a74__ = (__tilevar_3__[__iter_17__-__iter_0__+FORMA_BLOCKDIM_X*(__iter_16__-__iter_1__+FORMA_BLOCKDIM_Y*mod(__iter_15__-1,rowz))]);
float __temp_a75__ = (__temp_a70__ + 0.166f * __temp_a74__);
float __temp_a79__ = (__tilevar_3__[__iter_17__-__iter_0__+FORMA_BLOCKDIM_X*(__iter_16__-__iter_1__+FORMA_BLOCKDIM_Y*mod(__iter_15__,rowz))]);
float __temp_a80__ = (__temp_a75__ - 1.670f * __temp_a79__);
__tilevar_4__[__iter_17__-__iter_0__+FORMA_BLOCKDIM_X*(__iter_16__-__iter_1__+FORMA_BLOCKDIM_Y*mod(__iter_15__,rowz))] = __temp_a80__;
}
}
}
int __iter_18__ = FORMA_MAX((__iter_2__-2),1) + (int)(threadIdx.z) ;
if(__iter_18__ <= FORMA_MIN(((__iter_2__+FORMA_BLOCKDIM_Z)-3),(L-2))) {
int __iter_19__ = FORMA_MAX((__iter_1__+2),1) + (int)(threadIdx.y) ;
for(; __iter_19__ <= FORMA_MIN(((__iter_1__+FORMA_BLOCKDIM_Y)-3),(M-2)) ; __iter_19__ += (int)(blockDim.y) ){
int __iter_20__ = FORMA_MAX((__iter_0__+2),1) + (int)(threadIdx.x) ;
if( __iter_20__ <= FORMA_MIN(((__iter_0__+FORMA_BLOCKDIM_X)-3),(N-2)) ){
if (__iter_19__ < (FORMA_MAX((__iter_1__+2),1)+2) || __iter_19__ > (FORMA_MIN(((__iter_1__+FORMA_BLOCKDIM_Y)-3),(M-2))-2) || __iter_20__ < (FORMA_MAX((__iter_0__+2),1)+2) || __iter_20__ > (FORMA_MIN(((__iter_0__+FORMA_BLOCKDIM_X)-3),(N-2))-2)) {
__copy_arr_1__[__iter_20__+N*(__iter_19__+M*(__iter_18__))] = __tilevar_4__[__iter_20__-__iter_0__+FORMA_BLOCKDIM_X*(__iter_19__-__iter_1__+FORMA_BLOCKDIM_Y*mod(__iter_18__,rowz))];
}
}
}
}
__syncthreads ();
int __iter_24__ = FORMA_MAX((__iter_2__-3),1) + (int)(threadIdx.z) ;
if(__iter_24__ <= FORMA_MIN(((__iter_2__+FORMA_BLOCKDIM_Z)-4),(L-2))) {
int __iter_25__ = FORMA_MAX((__iter_1__+3),1) + (int)(threadIdx.y) ;
for(; __iter_25__ <= FORMA_MIN(((__iter_1__+FORMA_BLOCKDIM_Y)-4),(M-2)); __iter_25__+=(int)(blockDim.y)) {
int __iter_26__ = FORMA_MAX((__iter_0__+3),1) + (int)(threadIdx.x) ;
if( __iter_26__ <= FORMA_MIN(((__iter_0__+FORMA_BLOCKDIM_X)-4),(N-2)) ){
float __temp_a94__ = (__tilevar_4__[__iter_26__+1-__iter_0__+FORMA_BLOCKDIM_X*(__iter_25__-__iter_1__+FORMA_BLOCKDIM_Y*mod(__iter_24__,rowz))]);
float __temp_a95__ = (__tilevar_4__[__iter_26__-1-__iter_0__+FORMA_BLOCKDIM_X*(__iter_25__-__iter_1__+FORMA_BLOCKDIM_Y*mod(__iter_24__,rowz))]);
float __temp_a96__ = (0.161f * __temp_a94__ + 0.162f * __temp_a95__);
float __temp_a97__ = (__tilevar_4__[__iter_26__-__iter_0__+FORMA_BLOCKDIM_X*(__iter_25__+1-__iter_1__+FORMA_BLOCKDIM_Y*mod(__iter_24__,rowz))]);
float __temp_a98__ = (__temp_a96__ + 0.163f * __temp_a97__);
float __temp_a99__ = (__tilevar_4__[__iter_26__-__iter_0__+FORMA_BLOCKDIM_X*(__iter_25__-1-__iter_1__+FORMA_BLOCKDIM_Y*mod(__iter_24__,rowz))]);
float __temp_a100__ = (__temp_a98__ + 0.164f * __temp_a99__);
float __temp_a101__ = (__tilevar_4__[__iter_26__-__iter_0__+FORMA_BLOCKDIM_X*(__iter_25__-__iter_1__+FORMA_BLOCKDIM_Y*mod(__iter_24__+1,rowz))]);
float __temp_a102__ = (__temp_a100__ + 0.165f * __temp_a101__);
float __temp_a103__ = (__tilevar_4__[__iter_26__-__iter_0__+FORMA_BLOCKDIM_X*(__iter_25__-__iter_1__+FORMA_BLOCKDIM_Y*mod(__iter_24__-1,rowz))]);
float __temp_a104__ = (__temp_a102__ + 0.166f * __temp_a103__);
float __temp_a105__ = (__tilevar_4__[__iter_26__-__iter_0__+FORMA_BLOCKDIM_X*(__iter_25__-__iter_1__+FORMA_BLOCKDIM_Y*mod(__iter_24__,rowz))]);
float __temp_a106__ = (__temp_a104__ - 1.670f * __temp_a105__);
__tilevar_5__[__iter_26__-__iter_0__+FORMA_BLOCKDIM_X*(__iter_25__-__iter_1__+FORMA_BLOCKDIM_Y*mod(__iter_24__,rowz))] = __temp_a106__;
}
}
}
int __iter_27__ = FORMA_MAX((__iter_2__-3),1) + (int)(threadIdx.z) ;
if(__iter_27__ <= FORMA_MIN(((__iter_2__+FORMA_BLOCKDIM_Z)-4),(L-2))) {
int __iter_28__ = FORMA_MAX((__iter_1__+3),1) + (int)(threadIdx.y) ;
for(; __iter_28__ <= FORMA_MIN(((__iter_1__+FORMA_BLOCKDIM_Y)-4),(M-2)) ; __iter_28__ += (int)(blockDim.y)){
int __iter_29__ = FORMA_MAX((__iter_0__+3),1) + (int)(threadIdx.x) ;
if( __iter_29__ <= FORMA_MIN(((__iter_0__+FORMA_BLOCKDIM_X)-4),(N-2)) ){
if (__iter_28__ < (FORMA_MAX((__iter_1__+3),1)+2) || __iter_28__ > (FORMA_MIN(((__iter_1__+FORMA_BLOCKDIM_Y)-4),(M-2))-2) || __iter_29__ < (FORMA_MAX((__iter_0__+3),1)+2) || __iter_29__ > (FORMA_MIN(((__iter_0__+FORMA_BLOCKDIM_X)-4),(N-2))-2)) {
__copy_arr_2__[__iter_29__+N*(__iter_28__+M*(__iter_27__))] = __tilevar_5__[__iter_29__-__iter_0__+FORMA_BLOCKDIM_X*(__iter_28__-__iter_1__+FORMA_BLOCKDIM_Y*mod(__iter_27__,rowz))];
}
}
}
}
__syncthreads ();
int __iter_33__ = FORMA_MAX((__iter_2__-4),1) + (int)(threadIdx.z) ;
if(__iter_33__ <= FORMA_MIN(((__iter_2__+FORMA_BLOCKDIM_Z)-5),(L-2))) {
int __iter_34__ = FORMA_MAX((__iter_1__+4),1) + (int)(threadIdx.y) ;
for (; __iter_34__ <= FORMA_MIN(((__iter_1__+FORMA_BLOCKDIM_Y)-5),(M-2)); __iter_34__+=(int)(blockDim.y)) {
int __iter_35__ = FORMA_MAX((__iter_0__+4),1) + (int)(threadIdx.x) ;
if( __iter_35__ <= FORMA_MIN(((__iter_0__+FORMA_BLOCKDIM_X)-5),(N-2)) ){
float __temp_a120__ = (__tilevar_5__[__iter_35__+1-__iter_0__+FORMA_BLOCKDIM_X*(__iter_34__-__iter_1__+FORMA_BLOCKDIM_Y*mod(__iter_33__,rowz))]);
float __temp_a121__ = (__tilevar_5__[__iter_35__-1-__iter_0__+FORMA_BLOCKDIM_X*(__iter_34__-__iter_1__+FORMA_BLOCKDIM_Y*mod(__iter_33__,rowz))]);
float __temp_a122__ = (0.161f * __temp_a120__ + 0.162f * __temp_a121__);
float __temp_a123__ = (__tilevar_5__[__iter_35__-__iter_0__+FORMA_BLOCKDIM_X*(__iter_34__+1-__iter_1__+FORMA_BLOCKDIM_Y*mod(__iter_33__,rowz))]);
float __temp_a124__ = (__temp_a122__ + 0.163f * __temp_a123__);
float __temp_a125__ = (__tilevar_5__[__iter_35__-__iter_0__+FORMA_BLOCKDIM_X*(__iter_34__-1-__iter_1__+FORMA_BLOCKDIM_Y*mod(__iter_33__,rowz))]);
float __temp_a126__ = (__temp_a124__ + 0.164f * __temp_a125__);
float __temp_a127__ = (__tilevar_5__[__iter_35__-__iter_0__+FORMA_BLOCKDIM_X*(__iter_34__-__iter_1__+FORMA_BLOCKDIM_Y*mod(__iter_33__+1,rowz))]);
float __temp_a128__ = (__temp_a126__ + 0.165f * __temp_a127__);
float __temp_a129__ = (__tilevar_5__[__iter_35__-__iter_0__+FORMA_BLOCKDIM_X*(__iter_34__-__iter_1__+FORMA_BLOCKDIM_Y*mod(__iter_33__-1,rowz))]);
float __temp_a130__ = (__temp_a128__ + 0.166f * __temp_a129__);
float __temp_a131__ = (__tilevar_5__[__iter_35__-__iter_0__+FORMA_BLOCKDIM_X*(__iter_34__-__iter_1__+FORMA_BLOCKDIM_Y*mod(__iter_33__,rowz))]);
float __temp_a132__ = (__temp_a130__ - 1.670f * __temp_a131__);
__var_1__[__iter_35__+N*(__iter_34__+M*(__iter_33__))] = __temp_a132__;
}
}
}
}
}
int __blockSizeToSMemSize___kernel___forma_kernel__0__(dim3 blockDim){
int FORMA_BLOCKDIM_Z = (int)(blockDim.z);
int FORMA_BLOCKDIM_Y = (int)(blockDim.y);
int FORMA_BLOCKDIM_X = (int)(blockDim.x);
int SMemSize = 0;
SMemSize += sizeof(float)*(2*(FORMA_BLOCKDIM_Z+4)*FORMA_BLOCKDIM_Y*FORMA_BLOCKDIM_X);
return SMemSize;
}
/* X+GAP, Y, Z */
__global__ void __kernel___forma_kernel__1__(float * __restrict__ input, int L, int M, int N, float * __restrict__ __copy_arr_0__, float * __restrict__ __copy_arr_1__, float * __restrict__ __copy_arr_2__, int FORMA_BLOCKDIM_X, int FORMA_BLOCKDIM_Y, int FORMA_BLOCKDIM_Z, float * __restrict__ __var_1__){
int __FORMA_SHARED_MEM_OFFSET__ = 0;
float* __tilevar_0__ = (float*)(__FORMA_SHARED_MEM__+__FORMA_SHARED_MEM_OFFSET__);
__FORMA_SHARED_MEM_OFFSET__ += sizeof(float)*(((FORMA_BLOCKDIM_Z+4)*FORMA_BLOCKDIM_Y*FORMA_BLOCKDIM_X));
float* __tilevar_1__ = (float*)(__FORMA_SHARED_MEM__+__FORMA_SHARED_MEM_OFFSET__);
__FORMA_SHARED_MEM_OFFSET__ += sizeof(float)*(((FORMA_BLOCKDIM_Z+4)*FORMA_BLOCKDIM_Y*FORMA_BLOCKDIM_X));
float * __tilevar_2__ = __tilevar_0__;
float * __tilevar_3__ = __tilevar_1__;
float * __tilevar_4__ = __tilevar_0__;
float * __tilevar_5__ = __tilevar_1__;
int rowz = FORMA_BLOCKDIM_Z+4;
int __iter_0__ = (int)(blockIdx.x)*((int)(FORMA_BLOCKDIM_X)+GAPX) + (int)(FORMA_BLOCKDIM_X);
int __iter_1__ = (int)(blockIdx.y)*((int)(FORMA_BLOCKDIM_Y)+GAPY);
for (int __iter_2__ = 0; __iter_2__ <= L-1; __iter_2__ += FORMA_BLOCKDIM_Z) {
int __iter_3__ = FORMA_MAX(__iter_2__,0) + (int)(threadIdx.z) ;
if(__iter_3__ <= FORMA_MIN(((__iter_2__+FORMA_BLOCKDIM_Z)-1),(L-1))) {
int __iter_4__ = FORMA_MAX(__iter_1__,0) + (int)(threadIdx.y) ;
for(; __iter_4__ <= FORMA_MIN(((__iter_1__+FORMA_BLOCKDIM_Y)-1),(M-1)); __iter_4__+=(int)(blockDim.y)) {
int __iter_5__ = FORMA_MAX(__iter_0__-2,0) + (int)(threadIdx.x) ;
if( __iter_5__ <= FORMA_MIN(((__iter_0__+GAPX+2)-1),(N-1)) ){
__tilevar_2__[__iter_5__+(EXTENT-__iter_0__)+FORMA_BLOCKDIM_X*(__iter_4__-__iter_1__+FORMA_BLOCKDIM_Y*mod(__iter_3__,rowz))] = input[__iter_5__+N*(__iter_4__+M*(__iter_3__))];
}
}
}
__syncthreads();
int __iter_6__ = FORMA_MAX((__iter_2__-1),1) + (int)(threadIdx.z) ;
if(__iter_6__ <= FORMA_MIN(((__iter_2__+FORMA_BLOCKDIM_Z)-2),(L-2))) {
int __iter_7__ = FORMA_MAX((__iter_1__+1),1) + (int)(threadIdx.y) ;
for(; __iter_7__ <= FORMA_MIN(((__iter_1__+FORMA_BLOCKDIM_Y)-2),(M-2)); __iter_7__+=(int)(blockDim.y)) {
int __iter_8__ = FORMA_MAX((__iter_0__-1),1) + (int)(threadIdx.x) ;
if( __iter_8__ <= FORMA_MIN(((__iter_0__+GAPX+1)-1),(N-2)) ){
float __temp_a3__ = (__tilevar_2__[__iter_8__+1+(EXTENT-__iter_0__)+FORMA_BLOCKDIM_X*(__iter_7__-__iter_1__+FORMA_BLOCKDIM_Y*mod(__iter_6__,rowz))]);
float __temp_a7__ = (__tilevar_2__[__iter_8__-1+(EXTENT-__iter_0__)+FORMA_BLOCKDIM_X*(__iter_7__-__iter_1__+FORMA_BLOCKDIM_Y*mod(__iter_6__,rowz))]);
float __temp_a8__ = (0.161f * __temp_a3__ + 0.162f * __temp_a7__);
float __temp_a12__ = (__tilevar_2__[__iter_8__+(EXTENT-__iter_0__)+FORMA_BLOCKDIM_X*(__iter_7__+1-__iter_1__+FORMA_BLOCKDIM_Y*mod(__iter_6__,rowz))]);
float __temp_a13__ = (__temp_a8__ + 0.163f * __temp_a12__);
float __temp_a17__ = (__tilevar_2__[__iter_8__+(EXTENT-__iter_0__)+FORMA_BLOCKDIM_X*(__iter_7__-1-__iter_1__+FORMA_BLOCKDIM_Y*mod(__iter_6__,rowz))]);
float __temp_a18__ = (__temp_a13__ + 0.164f * __temp_a17__);
float __temp_a22__ = (__tilevar_2__[__iter_8__+(EXTENT-__iter_0__)+FORMA_BLOCKDIM_X*(__iter_7__-__iter_1__+FORMA_BLOCKDIM_Y*mod(__iter_6__+1,rowz))]);
float __temp_a23__ = (__temp_a18__ + 0.165f * __temp_a22__);
float __temp_a27__ = (__tilevar_2__[__iter_8__+(EXTENT-__iter_0__)+FORMA_BLOCKDIM_X*(__iter_7__-__iter_1__+FORMA_BLOCKDIM_Y*mod(__iter_6__-1,rowz))]);
float __temp_a28__ = (__temp_a23__ + 0.166f * __temp_a27__);
float __temp_a32__ = (__tilevar_2__[__iter_8__+(EXTENT-__iter_0__)+FORMA_BLOCKDIM_X*(__iter_7__-__iter_1__+FORMA_BLOCKDIM_Y*mod( __iter_6__,rowz))]);
float __temp_a33__ = (__temp_a28__ - 1.670f * __temp_a32__);
__tilevar_3__[__iter_8__+(EXTENT-__iter_0__)+FORMA_BLOCKDIM_X*(__iter_7__-__iter_1__+FORMA_BLOCKDIM_Y*mod(__iter_6__,rowz))] = __temp_a33__;
}
}
}
int __iter_9__ = FORMA_MAX((__iter_2__-1),1) + (int)(threadIdx.z) ;
if(__iter_9__ <= FORMA_MIN(((__iter_2__+FORMA_BLOCKDIM_Z)-2),(L-2))) {
int __iter_10__ = FORMA_MAX((__iter_1__+1),1) + (int)(threadIdx.y) ;
for(; __iter_10__ <= FORMA_MIN(((__iter_1__+FORMA_BLOCKDIM_Y)-2),(M-2)) ; __iter_10__ += (int)(blockDim.y)){
int __iter_11__ = FORMA_MAX((__iter_0__-1),1) + (int)(threadIdx.x) ;
if( __iter_11__ <= FORMA_MIN(((__iter_0__+GAPX+1)-1),(N-2)) ){
if (__iter_10__ < (FORMA_MAX((__iter_1__+1),1)+2) || __iter_10__ > (FORMA_MIN(((__iter_1__+FORMA_BLOCKDIM_Y)-2),(M-2))-2)) {
__copy_arr_0__[__iter_11__+N*(__iter_10__+M*(__iter_9__))] = __tilevar_3__[__iter_11__+(EXTENT-__iter_0__)+FORMA_BLOCKDIM_X*(__iter_10__-__iter_1__+FORMA_BLOCKDIM_Y*mod(__iter_9__,rowz))];
}
}
}
}
__iter_9__ = FORMA_MAX((__iter_2__-1),1) + (int)(threadIdx.z) ;
if(__iter_9__ <= FORMA_MIN(((__iter_2__+FORMA_BLOCKDIM_Z)-2),(L-2))) {
int __iter_10__ = FORMA_MAX((__iter_1__+1),1) + (int)(threadIdx.y) ;
for(; __iter_10__ <= FORMA_MIN(((__iter_1__+FORMA_BLOCKDIM_Y)-2),(M-2)) ; __iter_10__ += (int)(blockDim.y)){
int __iter_11__ = FORMA_MAX((__iter_0__-3),1) + (int)(threadIdx.x) ;
if( __iter_11__ <= FORMA_MIN(((__iter_0__+GAPX+3)-1),(N-2)) ){
if (__iter_11__ < FORMA_MAX((__iter_0__-1),1) || __iter_11__ > FORMA_MIN(((__iter_0__+GAPX+1)-1),(N-2))) {
__tilevar_3__[__iter_11__+(EXTENT-__iter_0__)+FORMA_BLOCKDIM_X*(__iter_10__-__iter_1__+FORMA_BLOCKDIM_Y*mod(__iter_9__,rowz))] = __copy_arr_0__[__iter_11__+N*(__iter_10__+M*(__iter_9__))];
}
}
}
}
__syncthreads();
int __iter_15__ = FORMA_MAX((__iter_2__-2),1) + (int)(threadIdx.z) ;
if(__iter_15__ <= FORMA_MIN(((__iter_2__+FORMA_BLOCKDIM_Z)-3),(L-2))) {
int __iter_16__ = FORMA_MAX((__iter_1__+2),1) + (int)(threadIdx.y) ;
for (;__iter_16__ <= FORMA_MIN(((__iter_1__+FORMA_BLOCKDIM_Y)-3),(M-2)); __iter_16__+=(int)(blockDim.y)) {
int __iter_17__ = FORMA_MAX((__iter_0__-2),1) + (int)(threadIdx.x) ;
if( __iter_17__ <= FORMA_MIN(((__iter_0__+GAPX+2)-1),(N-2)) ){
float __temp_a50__ = (__tilevar_3__[__iter_17__+1+(EXTENT-__iter_0__)+FORMA_BLOCKDIM_X*(__iter_16__-__iter_1__+FORMA_BLOCKDIM_Y*mod(__iter_15__,rowz))]);
float __temp_a54__ = (__tilevar_3__[__iter_17__-1+(EXTENT-__iter_0__)+FORMA_BLOCKDIM_X*(__iter_16__-__iter_1__+FORMA_BLOCKDIM_Y*mod(__iter_15__,rowz))]);
float __temp_a55__ = (0.161f * __temp_a50__ + 0.162f * __temp_a54__);
float __temp_a59__ = (__tilevar_3__[__iter_17__+(EXTENT-__iter_0__)+FORMA_BLOCKDIM_X*(__iter_16__+1-__iter_1__+FORMA_BLOCKDIM_Y*mod(__iter_15__,rowz))]);
float __temp_a60__ = (__temp_a55__ + 0.163f * __temp_a59__);
float __temp_a64__ = (__tilevar_3__[__iter_17__+(EXTENT-__iter_0__)+FORMA_BLOCKDIM_X*(__iter_16__-1-__iter_1__+FORMA_BLOCKDIM_Y*mod(__iter_15__,rowz))]);
float __temp_a65__ = (__temp_a60__ + 0.164f * __temp_a64__);
float __temp_a69__ = (__tilevar_3__[__iter_17__+(EXTENT-__iter_0__)+FORMA_BLOCKDIM_X*(__iter_16__-__iter_1__+FORMA_BLOCKDIM_Y*mod(__iter_15__+1,rowz))]);
float __temp_a70__ = (__temp_a65__ + 0.165f * __temp_a69__);
float __temp_a74__ = (__tilevar_3__[__iter_17__+(EXTENT-__iter_0__)+FORMA_BLOCKDIM_X*(__iter_16__-__iter_1__+FORMA_BLOCKDIM_Y*mod(__iter_15__-1,rowz))]);
float __temp_a75__ = (__temp_a70__ + 0.166f * __temp_a74__);
float __temp_a79__ = (__tilevar_3__[__iter_17__+(EXTENT-__iter_0__)+FORMA_BLOCKDIM_X*(__iter_16__-__iter_1__+FORMA_BLOCKDIM_Y*mod(__iter_15__,rowz))]);
float __temp_a80__ = (__temp_a75__ - 1.670f * __temp_a79__);
__tilevar_4__[__iter_17__+(EXTENT-__iter_0__)+FORMA_BLOCKDIM_X*(__iter_16__-__iter_1__+FORMA_BLOCKDIM_Y*mod(__iter_15__,rowz))] = __temp_a80__;
}
}
}
int __iter_18__ = FORMA_MAX((__iter_2__-2),1) + (int)(threadIdx.z) ;
if(__iter_18__ <= FORMA_MIN(((__iter_2__+FORMA_BLOCKDIM_Z)-3),(L-2))) {
int __iter_19__ = FORMA_MAX((__iter_1__+2),1) + (int)(threadIdx.y) ;
for(; __iter_19__ <= FORMA_MIN(((__iter_1__+FORMA_BLOCKDIM_Y)-3),(M-2)) ; __iter_19__ += (int)(blockDim.y) ){
int __iter_20__ = FORMA_MAX((__iter_0__-2),1) + (int)(threadIdx.x) ;
if( __iter_20__ <= FORMA_MIN(((__iter_0__+GAPX+2)-1),(N-2)) ){
if (__iter_19__ < (FORMA_MAX((__iter_1__+2),1)+2) || __iter_19__ > (FORMA_MIN(((__iter_1__+FORMA_BLOCKDIM_Y)-3),(M-2))-2)) {
__copy_arr_1__[__iter_20__+N*(__iter_19__+M*(__iter_18__))] = __tilevar_4__[__iter_20__+(EXTENT-__iter_0__)+FORMA_BLOCKDIM_X*(__iter_19__-__iter_1__+FORMA_BLOCKDIM_Y*mod(__iter_18__,rowz))];
}
}
}
}
__iter_18__ = FORMA_MAX((__iter_2__-2),1) + (int)(threadIdx.z) ;
if(__iter_18__ <= FORMA_MIN(((__iter_2__+FORMA_BLOCKDIM_Z)-3),(L-2))) {
int __iter_19__ = FORMA_MAX((__iter_1__+2),1) + (int)(threadIdx.y) ;
for(; __iter_19__ <= FORMA_MIN(((__iter_1__+FORMA_BLOCKDIM_Y)-3),(M-2)) ; __iter_19__ += (int)(blockDim.y) ){
int __iter_20__ = FORMA_MAX((__iter_0__-4),1) + (int)(threadIdx.x) ;
if( __iter_20__ <= FORMA_MIN(((__iter_0__+GAPX+4)-1),(N-2)) ){
if (__iter_20__ < FORMA_MAX((__iter_0__-2),1) || __iter_20__ > FORMA_MIN(((__iter_0__+GAPX+2)-1),(N-2))) {
__tilevar_4__[__iter_20__+(EXTENT-__iter_0__)+FORMA_BLOCKDIM_X*(__iter_19__-__iter_1__+FORMA_BLOCKDIM_Y*mod(__iter_18__,rowz))] = __copy_arr_1__[__iter_20__+N*(__iter_19__+M*(__iter_18__))];
}
}
}
}
__syncthreads();
int __iter_24__ = FORMA_MAX((__iter_2__-3),1) + (int)(threadIdx.z) ;
if(__iter_24__ <= FORMA_MIN(((__iter_2__+FORMA_BLOCKDIM_Z)-4),(L-2))) {
int __iter_25__ = FORMA_MAX((__iter_1__+3),1) + (int)(threadIdx.y) ;
for(; __iter_25__ <= FORMA_MIN(((__iter_1__+FORMA_BLOCKDIM_Y)-4),(M-2)); __iter_25__+=(int)(blockDim.y)) {
int __iter_26__ = FORMA_MAX((__iter_0__-3),1) + (int)(threadIdx.x) ;
if( __iter_26__ <= FORMA_MIN(((__iter_0__+GAPX+3)-1),(N-2)) ){
float __temp_a94__ = (__tilevar_4__[__iter_26__+1+(EXTENT-__iter_0__)+FORMA_BLOCKDIM_X*(__iter_25__-__iter_1__+FORMA_BLOCKDIM_Y*mod(__iter_24__,rowz))]);
float __temp_a95__ = (__tilevar_4__[__iter_26__-1+(EXTENT-__iter_0__)+FORMA_BLOCKDIM_X*(__iter_25__-__iter_1__+FORMA_BLOCKDIM_Y*mod(__iter_24__,rowz))]);
float __temp_a96__ = (0.161f * __temp_a94__ + 0.162f * __temp_a95__);
float __temp_a97__ = (__tilevar_4__[__iter_26__+(EXTENT-__iter_0__)+FORMA_BLOCKDIM_X*(__iter_25__+1-__iter_1__+FORMA_BLOCKDIM_Y*mod(__iter_24__,rowz))]);
float __temp_a98__ = (__temp_a96__ + 0.163f * __temp_a97__);
float __temp_a99__ = (__tilevar_4__[__iter_26__+(EXTENT-__iter_0__)+FORMA_BLOCKDIM_X*(__iter_25__-1-__iter_1__+FORMA_BLOCKDIM_Y*mod(__iter_24__,rowz))]);
float __temp_a100__ = (__temp_a98__ + 0.164f * __temp_a99__);
float __temp_a101__ = (__tilevar_4__[__iter_26__+(EXTENT-__iter_0__)+FORMA_BLOCKDIM_X*(__iter_25__-__iter_1__+FORMA_BLOCKDIM_Y*mod(__iter_24__+1,rowz))]);
float __temp_a102__ = (__temp_a100__ + 0.165f * __temp_a101__);
float __temp_a103__ = (__tilevar_4__[__iter_26__+(EXTENT-__iter_0__)+FORMA_BLOCKDIM_X*(__iter_25__-__iter_1__+FORMA_BLOCKDIM_Y*mod(__iter_24__-1,rowz))]);
float __temp_a104__ = (__temp_a102__ + 0.166f * __temp_a103__);
float __temp_a105__ = (__tilevar_4__[__iter_26__+(EXTENT-__iter_0__)+FORMA_BLOCKDIM_X*(__iter_25__-__iter_1__+FORMA_BLOCKDIM_Y*mod(__iter_24__,rowz))]);
float __temp_a106__ = (__temp_a104__ - 1.670f * __temp_a105__);
__tilevar_5__[__iter_26__+(EXTENT-__iter_0__)+FORMA_BLOCKDIM_X*(__iter_25__-__iter_1__+FORMA_BLOCKDIM_Y*mod(__iter_24__,rowz))] = __temp_a106__;
}
}
}
int __iter_27__ = FORMA_MAX((__iter_2__-3),1) + (int)(threadIdx.z) ;
if(__iter_27__ <= FORMA_MIN(((__iter_2__+FORMA_BLOCKDIM_Z)-4),(L-2))) {
int __iter_28__ = FORMA_MAX((__iter_1__+3),1) + (int)(threadIdx.y) ;
for(; __iter_28__ <= FORMA_MIN(((__iter_1__+FORMA_BLOCKDIM_Y)-4),(M-2)) ; __iter_28__ += (int)(blockDim.y)){
int __iter_29__ = FORMA_MAX((__iter_0__-3),1) + (int)(threadIdx.x) ;
if( __iter_29__ <= FORMA_MIN(((__iter_0__+GAPX+3)-1),(N-2)) ){
if (__iter_28__ < (FORMA_MAX((__iter_1__+3),1)+2) || __iter_28__ > (FORMA_MIN(((__iter_1__+FORMA_BLOCKDIM_Y)-4),(M-2))-2)) {
__copy_arr_2__[__iter_29__+N*(__iter_28__+M*(__iter_27__))] = __tilevar_5__[__iter_29__+(EXTENT-__iter_0__)+FORMA_BLOCKDIM_X*(__iter_28__-__iter_1__+FORMA_BLOCKDIM_Y*mod(__iter_27__,rowz))];
}
}
}
}
__iter_27__ = FORMA_MAX((__iter_2__-3),1) + (int)(threadIdx.z) ;
if(__iter_27__ <= FORMA_MIN(((__iter_2__+FORMA_BLOCKDIM_Z)-4),(L-2))) {
int __iter_28__ = FORMA_MAX((__iter_1__+3),1) + (int)(threadIdx.y) ;
for(; __iter_28__ <= FORMA_MIN(((__iter_1__+FORMA_BLOCKDIM_Y)-4),(M-2)) ; __iter_28__ += (int)(blockDim.y)){
int __iter_29__ = FORMA_MAX((__iter_0__-5),1) + (int)(threadIdx.x) ;
if( __iter_29__ <= FORMA_MIN(((__iter_0__+GAPX+5)-1),(N-2)) ){
if (__iter_29__ < FORMA_MAX((__iter_0__-3),1) || __iter_29__ > FORMA_MIN(((__iter_0__+GAPX+3)-1),(N-2))) {
__tilevar_5__[__iter_29__+(EXTENT-__iter_0__)+FORMA_BLOCKDIM_X*(__iter_28__-__iter_1__+FORMA_BLOCKDIM_Y*mod(__iter_27__,rowz))] = __copy_arr_2__[__iter_29__+N*(__iter_28__+M*(__iter_27__))];
}
}
}
}
__syncthreads();
int __iter_33__ = FORMA_MAX((__iter_2__-4),1) + (int)(threadIdx.z) ;
if(__iter_33__ <= FORMA_MIN(((__iter_2__+FORMA_BLOCKDIM_Z)-5),(L-2))) {
int __iter_34__ = FORMA_MAX((__iter_1__+4),1) + (int)(threadIdx.y) ;
for(; __iter_34__ <= FORMA_MIN(((__iter_1__+FORMA_BLOCKDIM_Y)-5),(M-2)); __iter_34__+=(int)(blockDim.y)) {
int __iter_35__ = FORMA_MAX((__iter_0__-4),1) + (int)(threadIdx.x) ;
if( __iter_35__ <= FORMA_MIN(((__iter_0__+GAPX+4)-1),(N-2)) ){
float __temp_a120__ = (__tilevar_5__[__iter_35__+1+(EXTENT-__iter_0__)+FORMA_BLOCKDIM_X*(__iter_34__-__iter_1__+FORMA_BLOCKDIM_Y*mod(__iter_33__,rowz))]);
float __temp_a121__ = (__tilevar_5__[__iter_35__-1+(EXTENT-__iter_0__)+FORMA_BLOCKDIM_X*(__iter_34__-__iter_1__+FORMA_BLOCKDIM_Y*mod(__iter_33__,rowz))]);
float __temp_a122__ = (0.161f * __temp_a120__ + 0.162f * __temp_a121__);
float __temp_a123__ = (__tilevar_5__[__iter_35__+(EXTENT-__iter_0__)+FORMA_BLOCKDIM_X*(__iter_34__+1-__iter_1__+FORMA_BLOCKDIM_Y*mod(__iter_33__,rowz))]);
float __temp_a124__ = (__temp_a122__ + 0.163f * __temp_a123__);
float __temp_a125__ = (__tilevar_5__[__iter_35__+(EXTENT-__iter_0__)+FORMA_BLOCKDIM_X*(__iter_34__-1-__iter_1__+FORMA_BLOCKDIM_Y*mod(__iter_33__,rowz))]);
float __temp_a126__ = (__temp_a124__ + 0.164f * __temp_a125__);
float __temp_a127__ = (__tilevar_5__[__iter_35__+(EXTENT-__iter_0__)+FORMA_BLOCKDIM_X*(__iter_34__-__iter_1__+FORMA_BLOCKDIM_Y*mod(__iter_33__+1,rowz))]);
float __temp_a128__ = (__temp_a126__ + 0.165f * __temp_a127__);
float __temp_a129__ = (__tilevar_5__[__iter_35__+(EXTENT-__iter_0__)+FORMA_BLOCKDIM_X*(__iter_34__-__iter_1__+FORMA_BLOCKDIM_Y*mod(__iter_33__-1,rowz))]);
float __temp_a130__ = (__temp_a128__ + 0.166f * __temp_a129__);
float __temp_a131__ = (__tilevar_5__[__iter_35__+(EXTENT-__iter_0__)+FORMA_BLOCKDIM_X*(__iter_34__-__iter_1__+FORMA_BLOCKDIM_Y*mod(__iter_33__,rowz))]);
float __temp_a132__ = (__temp_a130__ - 1.670f * __temp_a131__);
__var_1__[__iter_35__+N*(__iter_34__+M*(__iter_33__))] = __temp_a132__;
}
}
}
}
}
/* X, Y+GAP, Z */
__global__ void __kernel___forma_kernel__2__(float * __restrict__ input, int L, int M, int N, float * __restrict__ __copy_arr_0__, float * __restrict__ __copy_arr_1__, float * __restrict__ __copy_arr_2__, int FORMA_BLOCKDIM_X, int FORMA_BLOCKDIM_Y, int FORMA_BLOCKDIM_Z, float * __restrict__ __var_1__){
int __FORMA_SHARED_MEM_OFFSET__ = 0;
float* __tilevar_0__ = (float*)(__FORMA_SHARED_MEM__+__FORMA_SHARED_MEM_OFFSET__);
__FORMA_SHARED_MEM_OFFSET__ += sizeof(float)*(((FORMA_BLOCKDIM_Z+4)*FORMA_BLOCKDIM_Y*FORMA_BLOCKDIM_X));
float* __tilevar_1__ = (float*)(__FORMA_SHARED_MEM__+__FORMA_SHARED_MEM_OFFSET__);
__FORMA_SHARED_MEM_OFFSET__ += sizeof(float)*(((FORMA_BLOCKDIM_Z+4)*FORMA_BLOCKDIM_Y*FORMA_BLOCKDIM_X));
float * __tilevar_2__ = __tilevar_0__;
float * __tilevar_3__ = __tilevar_1__;
float * __tilevar_4__ = __tilevar_0__;
float * __tilevar_5__ = __tilevar_1__;
int rowz = FORMA_BLOCKDIM_Z+4;
int __iter_0__ = (int)(blockIdx.x)*((int)(FORMA_BLOCKDIM_X)+GAPX);
int __iter_1__ = (int)(blockIdx.y)*((int)(FORMA_BLOCKDIM_Y)+GAPY) + (int)(FORMA_BLOCKDIM_Y);
for (int __iter_2__ = 0; __iter_2__ <= L-1; __iter_2__ += FORMA_BLOCKDIM_Z) {
int __iter_3__ = FORMA_MAX(__iter_2__,0) + (int)(threadIdx.z) ;
if(__iter_3__ <= FORMA_MIN(((__iter_2__+FORMA_BLOCKDIM_Z)-1),(L-1))) {
int __iter_4__ = FORMA_MAX(__iter_1__-2,0) + (int)(threadIdx.y) ;
for(; __iter_4__ <= FORMA_MIN(((__iter_1__+GAPY+2)-1),(M-1)); __iter_4__+=(int)(blockDim.y)) {
int __iter_5__ = FORMA_MAX(__iter_0__,0) + (int)(threadIdx.x) ;
if( __iter_5__ <= FORMA_MIN(((__iter_0__+FORMA_BLOCKDIM_X)-1),(N-1)) ){
__tilevar_2__[__iter_5__-__iter_0__+FORMA_BLOCKDIM_X*(__iter_4__+(EXTENT-__iter_1__)+FORMA_BLOCKDIM_Y*mod(__iter_3__,rowz))] = input[__iter_5__+N*(__iter_4__+M*(__iter_3__))];
}
}
}
__syncthreads();
int __iter_6__ = FORMA_MAX((__iter_2__-1),1) + (int)(threadIdx.z) ;
if(__iter_6__ <= FORMA_MIN(((__iter_2__+FORMA_BLOCKDIM_Z)-2),(L-2))) {
int __iter_7__ = FORMA_MAX((__iter_1__-1),1) + (int)(threadIdx.y) ;
for(; __iter_7__ <= FORMA_MIN(((__iter_1__+GAPY+1)-1),(M-2)); __iter_7__+=(int)(blockDim.y)) {
int __iter_8__ = FORMA_MAX((__iter_0__+1),1) + (int)(threadIdx.x) ;
if( __iter_8__ <= FORMA_MIN(((__iter_0__+FORMA_BLOCKDIM_X)-2),(N-2)) ){
float __temp_a3__ = (__tilevar_2__[__iter_8__+1-__iter_0__+FORMA_BLOCKDIM_X*(__iter_7__+(EXTENT-__iter_1__)+FORMA_BLOCKDIM_Y*mod(__iter_6__,rowz))]);
float __temp_a7__ = (__tilevar_2__[__iter_8__-1-__iter_0__+FORMA_BLOCKDIM_X*(__iter_7__+(EXTENT-__iter_1__)+FORMA_BLOCKDIM_Y*mod(__iter_6__,rowz))]);
float __temp_a8__ = (0.161f * __temp_a3__ + 0.162f * __temp_a7__);
float __temp_a12__ = (__tilevar_2__[__iter_8__-__iter_0__+FORMA_BLOCKDIM_X*(__iter_7__+1+(EXTENT-__iter_1__)+FORMA_BLOCKDIM_Y*mod(__iter_6__,rowz))]);
float __temp_a13__ = (__temp_a8__ + 0.163f * __temp_a12__);
float __temp_a17__ = (__tilevar_2__[__iter_8__-__iter_0__+FORMA_BLOCKDIM_X*(__iter_7__-1+(EXTENT-__iter_1__)+FORMA_BLOCKDIM_Y*mod(__iter_6__,rowz))]);
float __temp_a18__ = (__temp_a13__ + 0.164f * __temp_a17__);
float __temp_a22__ = (__tilevar_2__[__iter_8__-__iter_0__+FORMA_BLOCKDIM_X*(__iter_7__+(EXTENT-__iter_1__)+FORMA_BLOCKDIM_Y*mod(__iter_6__+1,rowz))]);
float __temp_a23__ = (__temp_a18__ + 0.165f * __temp_a22__);
float __temp_a27__ = (__tilevar_2__[__iter_8__-__iter_0__+FORMA_BLOCKDIM_X*(__iter_7__+(EXTENT-__iter_1__)+FORMA_BLOCKDIM_Y*mod(__iter_6__-1,rowz))]);
float __temp_a28__ = (__temp_a23__ + 0.166f * __temp_a27__);
float __temp_a32__ = (__tilevar_2__[__iter_8__-__iter_0__+FORMA_BLOCKDIM_X*(__iter_7__+(EXTENT-__iter_1__)+FORMA_BLOCKDIM_Y*mod(__iter_6__,rowz))]);
float __temp_a33__ = (__temp_a28__ - 1.670f * __temp_a32__);
__tilevar_3__[__iter_8__-__iter_0__+FORMA_BLOCKDIM_X*(__iter_7__+(EXTENT-__iter_1__)+FORMA_BLOCKDIM_Y*mod(__iter_6__,rowz))] = __temp_a33__;
}
}
}
int __iter_9__ = FORMA_MAX((__iter_2__-1),1) + (int)(threadIdx.z) ;
if(__iter_9__ <= FORMA_MIN(((__iter_2__+FORMA_BLOCKDIM_Z)-2),(L-2))) {
int __iter_10__ = FORMA_MAX((__iter_1__-1),1) + (int)(threadIdx.y) ;
for(; __iter_10__ <= FORMA_MIN(((__iter_1__+GAPY+1)-1),(M-2)) ; __iter_10__ += (int)(blockDim.y)){
int __iter_11__ = FORMA_MAX((__iter_0__+1),1) + (int)(threadIdx.x) ;
if( __iter_11__ <= FORMA_MIN(((__iter_0__+FORMA_BLOCKDIM_X)-2),(N-2)) ){
if (__iter_11__ < (FORMA_MAX((__iter_0__+1),1)+2) || __iter_11__ > (FORMA_MIN(((__iter_0__+FORMA_BLOCKDIM_X)-2),(N-2))-2)) {
__copy_arr_0__[__iter_11__+N*(__iter_10__+M*(__iter_9__))] = __tilevar_3__[__iter_11__-__iter_0__+FORMA_BLOCKDIM_X*(__iter_10__+(EXTENT-__iter_1__)+FORMA_BLOCKDIM_Y*mod(__iter_9__,rowz))];
}
}
}
}
__iter_9__ = FORMA_MAX((__iter_2__-1),1) + (int)(threadIdx.z) ;
if(__iter_9__ <= FORMA_MIN(((__iter_2__+FORMA_BLOCKDIM_Z)-2),(L-2))) {
int __iter_10__ = FORMA_MAX((__iter_1__-3),1) + (int)(threadIdx.y) ;
for(; __iter_10__ <= FORMA_MIN(((__iter_1__+GAPY+3)-1),(M-2)) ; __iter_10__ += (int)(blockDim.y)){
int __iter_11__ = FORMA_MAX((__iter_0__+1),1) + (int)(threadIdx.x) ;
if( __iter_11__ <= FORMA_MIN(((__iter_0__+FORMA_BLOCKDIM_X)-2),(N-2)) ){
if (__iter_10__ < FORMA_MAX((__iter_1__-1),1) || __iter_10__ > FORMA_MIN(((__iter_1__+GAPY+1)-1),(M-2))) {
__tilevar_3__[__iter_11__-__iter_0__+FORMA_BLOCKDIM_X*(__iter_10__+(EXTENT-__iter_1__)+FORMA_BLOCKDIM_Y*mod(__iter_9__,rowz))] = __copy_arr_0__[__iter_11__+N*(__iter_10__+M*(__iter_9__))];
}
}
}
}
__syncthreads();
int __iter_15__ = FORMA_MAX((__iter_2__-2),1) + (int)(threadIdx.z) ;
if(__iter_15__ <= FORMA_MIN(((__iter_2__+FORMA_BLOCKDIM_Z)-3),(L-2))) {
int __iter_16__ = FORMA_MAX((__iter_1__-2),1) + (int)(threadIdx.y) ;
for(; __iter_16__ <= FORMA_MIN(((__iter_1__+GAPY+2)-1),(M-2)); __iter_16__+=(int)(blockDim.y)) {
int __iter_17__ = FORMA_MAX((__iter_0__+2),1) + (int)(threadIdx.x) ;
if( __iter_17__ <= FORMA_MIN(((__iter_0__+FORMA_BLOCKDIM_X)-3),(N-2)) ){
float __temp_a50__ = (__tilevar_3__[__iter_17__+1-__iter_0__+FORMA_BLOCKDIM_X*(__iter_16__+(EXTENT-__iter_1__)+FORMA_BLOCKDIM_Y*mod(__iter_15__,rowz))]);
float __temp_a54__ = (__tilevar_3__[__iter_17__-1-__iter_0__+FORMA_BLOCKDIM_X*(__iter_16__+(EXTENT-__iter_1__)+FORMA_BLOCKDIM_Y*mod(__iter_15__,rowz))]);
float __temp_a55__ = (0.161f * __temp_a50__ + 0.162f * __temp_a54__);
float __temp_a59__ = (__tilevar_3__[__iter_17__-__iter_0__+FORMA_BLOCKDIM_X*(__iter_16__+1+(EXTENT-__iter_1__)+FORMA_BLOCKDIM_Y*mod(__iter_15__,rowz))]);
float __temp_a60__ = (__temp_a55__ + 0.163f * __temp_a59__);
float __temp_a64__ = (__tilevar_3__[__iter_17__-__iter_0__+FORMA_BLOCKDIM_X*(__iter_16__-1+(EXTENT-__iter_1__)+FORMA_BLOCKDIM_Y*mod(__iter_15__,rowz))]);
float __temp_a65__ = (__temp_a60__ + 0.164f * __temp_a64__);
float __temp_a69__ = (__tilevar_3__[__iter_17__-__iter_0__+FORMA_BLOCKDIM_X*(__iter_16__+(EXTENT-__iter_1__)+FORMA_BLOCKDIM_Y*mod(__iter_15__+1,rowz))]);
float __temp_a70__ = (__temp_a65__ + 0.165f * __temp_a69__);
float __temp_a74__ = (__tilevar_3__[__iter_17__-__iter_0__+FORMA_BLOCKDIM_X*(__iter_16__+(EXTENT-__iter_1__)+FORMA_BLOCKDIM_Y*mod(__iter_15__-1,rowz))]);
float __temp_a75__ = (__temp_a70__ + 0.166f * __temp_a74__);
float __temp_a79__ = (__tilevar_3__[__iter_17__-__iter_0__+FORMA_BLOCKDIM_X*(__iter_16__+(EXTENT-__iter_1__)+FORMA_BLOCKDIM_Y*mod(__iter_15__,rowz))]);
float __temp_a80__ = (__temp_a75__ - 1.670f * __temp_a79__);
__tilevar_4__[__iter_17__-__iter_0__+FORMA_BLOCKDIM_X*(__iter_16__+(EXTENT-__iter_1__)+FORMA_BLOCKDIM_Y*mod(__iter_15__,rowz))] = __temp_a80__;
}
}
}
int __iter_18__ = FORMA_MAX((__iter_2__-2),1) + (int)(threadIdx.z) ;
if(__iter_18__ <= FORMA_MIN(((__iter_2__+FORMA_BLOCKDIM_Z)-3),(L-2))) {
int __iter_19__ = FORMA_MAX((__iter_1__-2),1) + (int)(threadIdx.y) ;
for(; __iter_19__ <= FORMA_MIN(((__iter_1__+GAPY+2)-1),(M-2)) ; __iter_19__ += (int)(blockDim.y) ){
int __iter_20__ = FORMA_MAX((__iter_0__+2),1) + (int)(threadIdx.x) ;
if( __iter_20__ <= FORMA_MIN(((__iter_0__+FORMA_BLOCKDIM_X)-3),(N-2)) ){
if (__iter_20__ < (FORMA_MAX((__iter_0__+2),1)+2) || __iter_20__ > (FORMA_MIN(((__iter_0__+FORMA_BLOCKDIM_X)-3),(N-2))-2)) {
__copy_arr_1__[__iter_20__+N*(__iter_19__+M*(__iter_18__))] = __tilevar_4__[__iter_20__-__iter_0__+FORMA_BLOCKDIM_X*(__iter_19__+(EXTENT-__iter_1__)+FORMA_BLOCKDIM_Y*mod(__iter_18__,rowz))];
}
}
}
}
__iter_18__ = FORMA_MAX((__iter_2__-2),1) + (int)(threadIdx.z) ;
if(__iter_18__ <= FORMA_MIN(((__iter_2__+FORMA_BLOCKDIM_Z)-3),(L-2))) {
int __iter_19__ = FORMA_MAX((__iter_1__-4),1) + (int)(threadIdx.y) ;
for(; __iter_19__ <= FORMA_MIN(((__iter_1__+GAPY+4)-1),(M-2)) ; __iter_19__ += (int)(blockDim.y) ){
int __iter_20__ = FORMA_MAX((__iter_0__+2),1) + (int)(threadIdx.x) ;
if( __iter_20__ <= FORMA_MIN(((__iter_0__+FORMA_BLOCKDIM_X)-3),(N-2)) ){
if (__iter_19__ < FORMA_MAX((__iter_1__-2),1) || __iter_19__ > FORMA_MIN(((__iter_1__+GAPY+2)-1),(M-2))) {
__tilevar_4__[__iter_20__-__iter_0__+FORMA_BLOCKDIM_X*(__iter_19__+(EXTENT-__iter_1__)+FORMA_BLOCKDIM_Y*mod(__iter_18__,rowz))] = __copy_arr_1__[__iter_20__+N*(__iter_19__+M*(__iter_18__))];
}
}
}
}
__syncthreads();
int __iter_24__ = FORMA_MAX((__iter_2__-3),1) + (int)(threadIdx.z) ;
if(__iter_24__ <= FORMA_MIN(((__iter_2__+FORMA_BLOCKDIM_Z)-4),(L-2))) {
int __iter_25__ = FORMA_MAX((__iter_1__-3),1) + (int)(threadIdx.y) ;
for (; __iter_25__ <= FORMA_MIN(((__iter_1__+GAPY+3)-1),(M-2)); __iter_25__+=(int)(blockDim.y)) {
int __iter_26__ = FORMA_MAX((__iter_0__+3),1) + (int)(threadIdx.x) ;
if( __iter_26__ <= FORMA_MIN(((__iter_0__+FORMA_BLOCKDIM_X)-4),(N-2)) ){
float __temp_a94__ = (__tilevar_4__[__iter_26__+1-__iter_0__+FORMA_BLOCKDIM_X*(__iter_25__+(EXTENT-__iter_1__)+FORMA_BLOCKDIM_Y*mod(__iter_24__,rowz))]);
float __temp_a95__ = (__tilevar_4__[__iter_26__-1-__iter_0__+FORMA_BLOCKDIM_X*(__iter_25__+(EXTENT-__iter_1__)+FORMA_BLOCKDIM_Y*mod(__iter_24__,rowz))]);
float __temp_a96__ = (0.161f * __temp_a94__ + 0.162f * __temp_a95__);
float __temp_a97__ = (__tilevar_4__[__iter_26__-__iter_0__+FORMA_BLOCKDIM_X*(__iter_25__+1+(EXTENT-__iter_1__)+FORMA_BLOCKDIM_Y*mod(__iter_24__,rowz))]);
float __temp_a98__ = (__temp_a96__ + 0.163f * __temp_a97__);
float __temp_a99__ = (__tilevar_4__[__iter_26__-__iter_0__+FORMA_BLOCKDIM_X*(__iter_25__-1+(EXTENT-__iter_1__)+FORMA_BLOCKDIM_Y*mod(__iter_24__,rowz))]);
float __temp_a100__ = (__temp_a98__ + 0.164f * __temp_a99__);
float __temp_a101__ = (__tilevar_4__[__iter_26__-__iter_0__+FORMA_BLOCKDIM_X*(__iter_25__+(EXTENT-__iter_1__)+FORMA_BLOCKDIM_Y*mod(__iter_24__+1,rowz))]);
float __temp_a102__ = (__temp_a100__ + 0.165f * __temp_a101__);
float __temp_a103__ = (__tilevar_4__[__iter_26__-__iter_0__+FORMA_BLOCKDIM_X*(__iter_25__+(EXTENT-__iter_1__)+FORMA_BLOCKDIM_Y*mod(__iter_24__-1,rowz))]);
float __temp_a104__ = (__temp_a102__ + 0.166f * __temp_a103__);
float __temp_a105__ = (__tilevar_4__[__iter_26__-__iter_0__+FORMA_BLOCKDIM_X*(__iter_25__+(EXTENT-__iter_1__)+FORMA_BLOCKDIM_Y*mod(__iter_24__,rowz))]);
float __temp_a106__ = (__temp_a104__ - 1.670f * __temp_a105__);
__tilevar_5__[__iter_26__-__iter_0__+FORMA_BLOCKDIM_X*(__iter_25__+(EXTENT-__iter_1__)+FORMA_BLOCKDIM_Y*mod(__iter_24__,rowz))] = __temp_a106__;
}
}
}
int __iter_27__ = FORMA_MAX((__iter_2__-3),1) + (int)(threadIdx.z) ;
if(__iter_27__ <= FORMA_MIN(((__iter_2__+FORMA_BLOCKDIM_Z)-4),(L-2))) {
int __iter_28__ = FORMA_MAX((__iter_1__-3),1) + (int)(threadIdx.y) ;
for(; __iter_28__ <= FORMA_MIN(((__iter_1__+GAPY+3)-1),(M-2)) ; __iter_28__ += (int)(blockDim.y)){
int __iter_29__ = FORMA_MAX((__iter_0__+3),1) + (int)(threadIdx.x) ;
if( __iter_29__ <= FORMA_MIN(((__iter_0__+FORMA_BLOCKDIM_X)-4),(N-2)) ){
if (__iter_29__ < (FORMA_MAX((__iter_0__+3),1)+2) || __iter_29__ > (FORMA_MIN(((__iter_0__+FORMA_BLOCKDIM_X)-4),(N-2))-2)) {
__copy_arr_2__[__iter_29__+N*(__iter_28__+M*(__iter_27__))] = __tilevar_5__[__iter_29__-__iter_0__+FORMA_BLOCKDIM_X*(__iter_28__+(EXTENT-__iter_1__)+FORMA_BLOCKDIM_Y*mod(__iter_27__,rowz))];
}
}
}
}
__iter_27__ = FORMA_MAX((__iter_2__-3),1) + (int)(threadIdx.z) ;
if(__iter_27__ <= FORMA_MIN(((__iter_2__+FORMA_BLOCKDIM_Z)-4),(L-2))) {
int __iter_28__ = FORMA_MAX((__iter_1__-5),1) + (int)(threadIdx.y) ;
for(; __iter_28__ <= FORMA_MIN(((__iter_1__+GAPY+5)-1),(M-2)) ; __iter_28__ += (int)(blockDim.y)){
int __iter_29__ = FORMA_MAX((__iter_0__+3),1) + (int)(threadIdx.x) ;
if( __iter_29__ <= FORMA_MIN(((__iter_0__+FORMA_BLOCKDIM_X)-4),(N-2)) ){
if (__iter_28__ < FORMA_MAX((__iter_1__-3),1) || __iter_28__ > FORMA_MIN(((__iter_1__+GAPY+3)-1),(M-2))) {
__tilevar_5__[__iter_29__-__iter_0__+FORMA_BLOCKDIM_X*(__iter_28__+(EXTENT-__iter_1__)+FORMA_BLOCKDIM_Y*mod(__iter_27__,rowz))] = __copy_arr_2__[__iter_29__+N*(__iter_28__+M*(__iter_27__))];
}
}
}
}
__syncthreads();
int __iter_33__ = FORMA_MAX((__iter_2__-4),1) + (int)(threadIdx.z) ;
if(__iter_33__ <= FORMA_MIN(((__iter_2__+FORMA_BLOCKDIM_Z)-5),(L-2))) {
int __iter_34__ = FORMA_MAX((__iter_1__-4),1) + (int)(threadIdx.y) ;
for(; __iter_34__ <= FORMA_MIN(((__iter_1__+GAPY+4)-1),(M-2)); __iter_34__+=(int)(blockDim.y)) {
int __iter_35__ = FORMA_MAX((__iter_0__+4),1) + (int)(threadIdx.x) ;
if( __iter_35__ <= FORMA_MIN(((__iter_0__+FORMA_BLOCKDIM_X)-5),(N-2)) ){
float __temp_a120__ = (__tilevar_5__[__iter_35__+1-__iter_0__+FORMA_BLOCKDIM_X*(__iter_34__+(EXTENT-__iter_1__)+FORMA_BLOCKDIM_Y*mod(__iter_33__,rowz))]);
float __temp_a121__ = (__tilevar_5__[__iter_35__-1-__iter_0__+FORMA_BLOCKDIM_X*(__iter_34__+(EXTENT-__iter_1__)+FORMA_BLOCKDIM_Y*mod(__iter_33__,rowz))]);
float __temp_a122__ = (0.161f * __temp_a120__ + 0.162f * __temp_a121__);
float __temp_a123__ = (__tilevar_5__[__iter_35__-__iter_0__+FORMA_BLOCKDIM_X*(__iter_34__+1+(EXTENT-__iter_1__)+FORMA_BLOCKDIM_Y*mod(__iter_33__,rowz))]);
float __temp_a124__ = (__temp_a122__ + 0.163f * __temp_a123__);
float __temp_a125__ = (__tilevar_5__[__iter_35__-__iter_0__+FORMA_BLOCKDIM_X*(__iter_34__-1+(EXTENT-__iter_1__)+FORMA_BLOCKDIM_Y*mod(__iter_33__,rowz))]);
float __temp_a126__ = (__temp_a124__ + 0.164f * __temp_a125__);
float __temp_a127__ = (__tilevar_5__[__iter_35__-__iter_0__+FORMA_BLOCKDIM_X*(__iter_34__+(EXTENT-__iter_1__)+FORMA_BLOCKDIM_Y*mod(__iter_33__+1,rowz))]);
float __temp_a128__ = (__temp_a126__ + 0.165f * __temp_a127__);
float __temp_a129__ = (__tilevar_5__[__iter_35__-__iter_0__+FORMA_BLOCKDIM_X*(__iter_34__+(EXTENT-__iter_1__)+FORMA_BLOCKDIM_Y*mod(__iter_33__-1,rowz))]);
float __temp_a130__ = (__temp_a128__ + 0.166f * __temp_a129__);
float __temp_a131__ = (__tilevar_5__[__iter_35__-__iter_0__+FORMA_BLOCKDIM_X*(__iter_34__+(EXTENT-__iter_1__)+FORMA_BLOCKDIM_Y*mod(__iter_33__,rowz))]);
float __temp_a132__ = (__temp_a130__ - 1.670f * __temp_a131__);
__var_1__[__iter_35__+N*(__iter_34__+M*(__iter_33__))] = __temp_a132__;
}
}
}
}
}
/* X+GAP, Y+GAP, Z */
__global__ void __kernel___forma_kernel__3__(float * __restrict__ input, int L, int M, int N, float * __restrict__ __copy_arr_0__, float * __restrict__ __copy_arr_1__, float * __restrict__ __copy_arr_2__, int FORMA_BLOCKDIM_X, int FORMA_BLOCKDIM_Y, int FORMA_BLOCKDIM_Z, float * __restrict__ __var_1__){
int __FORMA_SHARED_MEM_OFFSET__ = 0;
float* __tilevar_0__ = (float*)(__FORMA_SHARED_MEM__+__FORMA_SHARED_MEM_OFFSET__);
__FORMA_SHARED_MEM_OFFSET__ += sizeof(float)*(((FORMA_BLOCKDIM_Z+4)*FORMA_BLOCKDIM_Y*FORMA_BLOCKDIM_X));
float* __tilevar_1__ = (float*)(__FORMA_SHARED_MEM__+__FORMA_SHARED_MEM_OFFSET__);
__FORMA_SHARED_MEM_OFFSET__ += sizeof(float)*(((FORMA_BLOCKDIM_Z+4)*FORMA_BLOCKDIM_Y*FORMA_BLOCKDIM_X));
float * __tilevar_2__ = __tilevar_0__;
float * __tilevar_3__ = __tilevar_1__;
float * __tilevar_4__ = __tilevar_0__;
float * __tilevar_5__ = __tilevar_1__;
int rowz = FORMA_BLOCKDIM_Z+4;
int __iter_0__ = (int)(blockIdx.x)*((int)(FORMA_BLOCKDIM_X)+GAPX) + (int)(FORMA_BLOCKDIM_X);
int __iter_1__ = (int)(blockIdx.y)*((int)(FORMA_BLOCKDIM_Y)+GAPY) + (int)(FORMA_BLOCKDIM_Y);
for (int __iter_2__ = 0; __iter_2__ <= L-1; __iter_2__ += FORMA_BLOCKDIM_Z) {
int __iter_3__ = FORMA_MAX(__iter_2__,0) + (int)(threadIdx.z) ;
if(__iter_3__ <= FORMA_MIN(((__iter_2__+FORMA_BLOCKDIM_Z)-1),(L-1))) {
int __iter_4__ = FORMA_MAX(__iter_1__-2,0) + (int)(threadIdx.y) ;
for(; __iter_4__ <= FORMA_MIN(((__iter_1__+GAPY+2)-1),(M-1)); __iter_4__+=(int)(blockDim.y)) {
int __iter_5__ = FORMA_MAX(__iter_0__-2,0) + (int)(threadIdx.x) ;
if( __iter_5__ <= FORMA_MIN(((__iter_0__+GAPX+2)-1),(N-1)) ){
__tilevar_2__[__iter_5__+(EXTENT-__iter_0__)+FORMA_BLOCKDIM_X*(__iter_4__+(EXTENT-__iter_1__)+FORMA_BLOCKDIM_Y*mod(__iter_3__,rowz))] = input[__iter_5__+N*(__iter_4__+M*(__iter_3__))];
}
}
}
__syncthreads();
int __iter_6__ = FORMA_MAX((__iter_2__-1),1) + (int)(threadIdx.z) ;
if(__iter_6__ <= FORMA_MIN(((__iter_2__+FORMA_BLOCKDIM_Z)-2),(L-2))) {
int __iter_7__ = FORMA_MAX((__iter_1__-1),1) + (int)(threadIdx.y) ;
for(; __iter_7__ <= FORMA_MIN(((__iter_1__+GAPY+1)-1),(M-2)); __iter_7__+=(int)(blockDim.y)) {
int __iter_8__ = FORMA_MAX((__iter_0__-1),1) + (int)(threadIdx.x) ;
if( __iter_8__ <= FORMA_MIN(((__iter_0__+GAPX+1)-1),(N-2)) ){
float __temp_a3__ = (__tilevar_2__[__iter_8__+1+(EXTENT-__iter_0__)+FORMA_BLOCKDIM_X*(__iter_7__+(EXTENT-__iter_1__)+FORMA_BLOCKDIM_Y*mod(__iter_6__,rowz))]);
float __temp_a7__ = (__tilevar_2__[__iter_8__-1+(EXTENT-__iter_0__)+FORMA_BLOCKDIM_X*(__iter_7__+(EXTENT-__iter_1__)+FORMA_BLOCKDIM_Y*mod(__iter_6__,rowz))]);
float __temp_a8__ = (0.161f * __temp_a3__ + 0.162f * __temp_a7__);
float __temp_a12__ = (__tilevar_2__[__iter_8__+(EXTENT-__iter_0__)+FORMA_BLOCKDIM_X*(__iter_7__+1+(EXTENT-__iter_1__)+FORMA_BLOCKDIM_Y*mod(__iter_6__,rowz))]);
float __temp_a13__ = (__temp_a8__ + 0.163f * __temp_a12__);
float __temp_a17__ = (__tilevar_2__[__iter_8__+(EXTENT-__iter_0__)+FORMA_BLOCKDIM_X*(__iter_7__-1+(EXTENT-__iter_1__)+FORMA_BLOCKDIM_Y*mod(__iter_6__,rowz))]);
float __temp_a18__ = (__temp_a13__ + 0.164f * __temp_a17__);
float __temp_a22__ = (__tilevar_2__[__iter_8__+(EXTENT-__iter_0__)+FORMA_BLOCKDIM_X*(__iter_7__+(EXTENT-__iter_1__)+FORMA_BLOCKDIM_Y*mod(__iter_6__+1,rowz))]);
float __temp_a23__ = (__temp_a18__ + 0.165f * __temp_a22__);
float __temp_a27__ = (__tilevar_2__[__iter_8__+(EXTENT-__iter_0__)+FORMA_BLOCKDIM_X*(__iter_7__+(EXTENT-__iter_1__)+FORMA_BLOCKDIM_Y*mod(__iter_6__-1,rowz))]);
float __temp_a28__ = (__temp_a23__ + 0.166f * __temp_a27__);
float __temp_a32__ = (__tilevar_2__[__iter_8__+(EXTENT-__iter_0__)+FORMA_BLOCKDIM_X*(__iter_7__+(EXTENT-__iter_1__)+FORMA_BLOCKDIM_Y*mod(__iter_6__,rowz))]);
float __temp_a33__ = (__temp_a28__ - 1.670f * __temp_a32__);
__tilevar_3__[__iter_8__+(EXTENT-__iter_0__)+FORMA_BLOCKDIM_X*(__iter_7__+(EXTENT-__iter_1__)+FORMA_BLOCKDIM_Y*mod(__iter_6__,rowz))] = __temp_a33__;
}
}
}
int __iter_9__ = FORMA_MAX((__iter_2__-1),1) + (int)(threadIdx.z) ;
if(__iter_9__ <= FORMA_MIN(((__iter_2__+FORMA_BLOCKDIM_Z)-2),(L-2))) {
int __iter_10__ = FORMA_MAX((__iter_1__-3),1) + (int)(threadIdx.y) ;
for(; __iter_10__ <= FORMA_MIN(((__iter_1__+GAPY+3)-1),(M-2)) ; __iter_10__ += (int)(blockDim.y)){
int __iter_11__ = FORMA_MAX((__iter_0__-3),1) + (int)(threadIdx.x) ;
if( __iter_11__ <= FORMA_MIN(((__iter_0__+GAPX+3)-1),(N-2)) ){
if (__iter_10__ < FORMA_MAX((__iter_1__-1),1) || __iter_10__ > FORMA_MIN(((__iter_1__+GAPY+1)-1),(M-2)) || __iter_11__ < FORMA_MAX((__iter_0__-1),1) || __iter_11__ > FORMA_MIN(((__iter_0__+GAPX+1)-1),(N-2))) {
__tilevar_3__[__iter_11__+(EXTENT-__iter_0__)+FORMA_BLOCKDIM_X*(__iter_10__+(EXTENT-__iter_1__)+FORMA_BLOCKDIM_Y*mod(__iter_9__,rowz))] = __copy_arr_0__[__iter_11__+N*(__iter_10__+M*(__iter_9__))];
}
}
}
}
__syncthreads();
int __iter_15__ = FORMA_MAX((__iter_2__-2),1) + (int)(threadIdx.z) ;
if(__iter_15__ <= FORMA_MIN(((__iter_2__+FORMA_BLOCKDIM_Z)-3),(L-2))) {
int __iter_16__ = FORMA_MAX((__iter_1__-2),1) + (int)(threadIdx.y) ;
for(; __iter_16__ <= FORMA_MIN(((__iter_1__+GAPY+2)-1),(M-2)); __iter_16__+=(int)(blockDim.y)) {
int __iter_17__ = FORMA_MAX((__iter_0__-2),1) + (int)(threadIdx.x) ;
if( __iter_17__ <= FORMA_MIN(((__iter_0__+GAPX+2)-1),(N-2)) ){
float __temp_a50__ = (__tilevar_3__[__iter_17__+1+(EXTENT-__iter_0__)+FORMA_BLOCKDIM_X*(__iter_16__+(EXTENT-__iter_1__)+FORMA_BLOCKDIM_Y*mod(__iter_15__,rowz))]);
float __temp_a54__ = (__tilevar_3__[__iter_17__-1+(EXTENT-__iter_0__)+FORMA_BLOCKDIM_X*(__iter_16__+(EXTENT-__iter_1__)+FORMA_BLOCKDIM_Y*mod(__iter_15__,rowz))]);
float __temp_a55__ = (0.161f * __temp_a50__ + 0.162f * __temp_a54__);
float __temp_a59__ = (__tilevar_3__[__iter_17__+(EXTENT-__iter_0__)+FORMA_BLOCKDIM_X*(__iter_16__+1+(EXTENT-__iter_1__)+FORMA_BLOCKDIM_Y*mod(__iter_15__,rowz))]);
float __temp_a60__ = (__temp_a55__ + 0.163f * __temp_a59__);
float __temp_a64__ = (__tilevar_3__[__iter_17__+(EXTENT-__iter_0__)+FORMA_BLOCKDIM_X*(__iter_16__-1+(EXTENT-__iter_1__)+FORMA_BLOCKDIM_Y*mod(__iter_15__,rowz))]);
float __temp_a65__ = (__temp_a60__ + 0.164f * __temp_a64__);
float __temp_a69__ = (__tilevar_3__[__iter_17__+(EXTENT-__iter_0__)+FORMA_BLOCKDIM_X*(__iter_16__+(EXTENT-__iter_1__)+FORMA_BLOCKDIM_Y*mod(__iter_15__+1,rowz))]);
float __temp_a70__ = (__temp_a65__ + 0.165f * __temp_a69__);
float __temp_a74__ = (__tilevar_3__[__iter_17__+(EXTENT-__iter_0__)+FORMA_BLOCKDIM_X*(__iter_16__+(EXTENT-__iter_1__)+FORMA_BLOCKDIM_Y*mod(__iter_15__-1,rowz))]);
float __temp_a75__ = (__temp_a70__ + 0.166f * __temp_a74__);
float __temp_a79__ = (__tilevar_3__[__iter_17__+(EXTENT-__iter_0__)+FORMA_BLOCKDIM_X*(__iter_16__+(EXTENT-__iter_1__)+FORMA_BLOCKDIM_Y*mod(__iter_15__,rowz))]);
float __temp_a80__ = (__temp_a75__ - 1.670f * __temp_a79__);
__tilevar_4__[__iter_17__+(EXTENT-__iter_0__)+FORMA_BLOCKDIM_X*(__iter_16__+(EXTENT-__iter_1__)+FORMA_BLOCKDIM_Y*mod(__iter_15__,rowz))] = __temp_a80__;
}
}
}
int __iter_18__ = FORMA_MAX((__iter_2__-2),1) + (int)(threadIdx.z) ;
if(__iter_18__ <= FORMA_MIN(((__iter_2__+FORMA_BLOCKDIM_Z)-3),(L-2))) {
int __iter_19__ = FORMA_MAX((__iter_1__-4),1) + (int)(threadIdx.y) ;
for(; __iter_19__ <= FORMA_MIN(((__iter_1__+GAPY+4)-1),(M-2)) ; __iter_19__ += (int)(blockDim.y) ){
int __iter_20__ = FORMA_MAX((__iter_0__-4),1) + (int)(threadIdx.x) ;
if( __iter_20__ <= FORMA_MIN(((__iter_0__+GAPX+4)-1),(N-2)) ){
if (__iter_19__ < (FORMA_MAX((__iter_1__-2),1)) || __iter_19__ > (FORMA_MIN(((__iter_1__+GAPY+2)-1),(M-2))) || __iter_20__ < FORMA_MAX((__iter_0__-2),1) || __iter_20__ > FORMA_MIN(((__iter_0__+GAPX+2)-1),(N-2))) {
__tilevar_4__[__iter_20__+(EXTENT-__iter_0__)+FORMA_BLOCKDIM_X*(__iter_19__+(EXTENT-__iter_1__)+FORMA_BLOCKDIM_Y*mod(__iter_18__,rowz))] = __copy_arr_1__[__iter_20__+N*(__iter_19__+M*(__iter_18__))];
}
}
}
}
__syncthreads();
int __iter_24__ = FORMA_MAX((__iter_2__-3),1) + (int)(threadIdx.z) ;
if(__iter_24__ <= FORMA_MIN(((__iter_2__+FORMA_BLOCKDIM_Z)-4),(L-2))) {
int __iter_25__ = FORMA_MAX((__iter_1__-3),1) + (int)(threadIdx.y) ;
for(; __iter_25__ <= FORMA_MIN(((__iter_1__+GAPY+3)-1),(M-2)); __iter_25__+=(int)(blockDim.y)) {
int __iter_26__ = FORMA_MAX((__iter_0__-3),1) + (int)(threadIdx.x) ;
if( __iter_26__ <= FORMA_MIN(((__iter_0__+GAPX+3)-1),(N-2)) ){
float __temp_a94__ = (__tilevar_4__[__iter_26__+1+(EXTENT-__iter_0__)+FORMA_BLOCKDIM_X*(__iter_25__+(EXTENT-__iter_1__)+FORMA_BLOCKDIM_Y*mod(__iter_24__,rowz))]);
float __temp_a95__ = (__tilevar_4__[__iter_26__-1+(EXTENT-__iter_0__)+FORMA_BLOCKDIM_X*(__iter_25__+(EXTENT-__iter_1__)+FORMA_BLOCKDIM_Y*mod(__iter_24__,rowz))]);
float __temp_a96__ = (0.161f * __temp_a94__ + 0.162f * __temp_a95__);
float __temp_a97__ = (__tilevar_4__[__iter_26__+(EXTENT-__iter_0__)+FORMA_BLOCKDIM_X*(__iter_25__+1+(EXTENT-__iter_1__)+FORMA_BLOCKDIM_Y*mod(__iter_24__,rowz))]);
float __temp_a98__ = (__temp_a96__ + 0.163f * __temp_a97__);
float __temp_a99__ = (__tilevar_4__[__iter_26__+(EXTENT-__iter_0__)+FORMA_BLOCKDIM_X*(__iter_25__-1+(EXTENT-__iter_1__)+FORMA_BLOCKDIM_Y*mod(__iter_24__,rowz))]);
float __temp_a100__ = (__temp_a98__ + 0.164f * __temp_a99__);
float __temp_a101__ = (__tilevar_4__[__iter_26__+(EXTENT-__iter_0__)+FORMA_BLOCKDIM_X*(__iter_25__+(EXTENT-__iter_1__)+FORMA_BLOCKDIM_Y*mod(__iter_24__+1,rowz))]);
float __temp_a102__ = (__temp_a100__ + 0.165f * __temp_a101__);
float __temp_a103__ = (__tilevar_4__[__iter_26__+(EXTENT-__iter_0__)+FORMA_BLOCKDIM_X*(__iter_25__+(EXTENT-__iter_1__)+FORMA_BLOCKDIM_Y*mod(__iter_24__-1,rowz))]);
float __temp_a104__ = (__temp_a102__ + 0.166f * __temp_a103__);
float __temp_a105__ = (__tilevar_4__[__iter_26__+(EXTENT-__iter_0__)+FORMA_BLOCKDIM_X*(__iter_25__+(EXTENT-__iter_1__)+FORMA_BLOCKDIM_Y*mod(__iter_24__,rowz))]);
float __temp_a106__ = (__temp_a104__ - 1.670f * __temp_a105__);
__tilevar_5__[__iter_26__+(EXTENT-__iter_0__)+FORMA_BLOCKDIM_X*(__iter_25__+(EXTENT-__iter_1__)+FORMA_BLOCKDIM_Y*mod(__iter_24__,rowz))] = __temp_a106__;
}
}
}
int __iter_27__ = FORMA_MAX((__iter_2__-3),1) + (int)(threadIdx.z) ;
if(__iter_27__ <= FORMA_MIN(((__iter_2__+FORMA_BLOCKDIM_Z)-4),(L-2))) {
int __iter_28__ = FORMA_MAX((__iter_1__-5),1) + (int)(threadIdx.y) ;
for(; __iter_28__ <= FORMA_MIN(((__iter_1__+GAPY+5)-1),(M-2)) ; __iter_28__ += (int)(blockDim.y)){
int __iter_29__ = FORMA_MAX((__iter_0__-5),1) + (int)(threadIdx.x) ;
if( __iter_29__ <= FORMA_MIN(((__iter_0__+GAPX+5)-1),(N-2)) ){
if (__iter_28__ < (FORMA_MAX((__iter_1__-3),1)) || __iter_28__ > (FORMA_MIN(((__iter_1__+GAPY+3)-1),(M-2))) || __iter_29__ < FORMA_MAX((__iter_0__-3),1) || __iter_29__ > FORMA_MIN(((__iter_0__+GAPX+3)-1),(N-2))) {
__tilevar_5__[__iter_29__+(EXTENT-__iter_0__)+FORMA_BLOCKDIM_X*(__iter_28__+(EXTENT-__iter_1__)+FORMA_BLOCKDIM_Y*mod(__iter_27__,rowz))] = __copy_arr_2__[__iter_29__+N*(__iter_28__+M*(__iter_27__))];
}
}
}
}
__syncthreads();
int __iter_33__ = FORMA_MAX((__iter_2__-4),1) + (int)(threadIdx.z) ;
if(__iter_33__ <= FORMA_MIN(((__iter_2__+FORMA_BLOCKDIM_Z)-5),(L-2))) {
int __iter_34__ = FORMA_MAX((__iter_1__-4),1) + (int)(threadIdx.y) ;
for(; __iter_34__ <= FORMA_MIN(((__iter_1__+GAPY+4)-1),(M-2)); __iter_34__+=(int)(blockDim.y)) {
int __iter_35__ = FORMA_MAX((__iter_0__-4),1) + (int)(threadIdx.x) ;
if( __iter_35__ <= FORMA_MIN(((__iter_0__+GAPX+4)-1),(N-2)) ){
float __temp_a120__ = (__tilevar_5__[__iter_35__+1+(EXTENT-__iter_0__)+FORMA_BLOCKDIM_X*(__iter_34__+(EXTENT-__iter_1__)+FORMA_BLOCKDIM_Y*mod(__iter_33__,rowz))]);
float __temp_a121__ = (__tilevar_5__[__iter_35__-1+(EXTENT-__iter_0__)+FORMA_BLOCKDIM_X*(__iter_34__+(EXTENT-__iter_1__)+FORMA_BLOCKDIM_Y*mod(__iter_33__,rowz))]);
float __temp_a122__ = (0.161f * __temp_a120__ + 0.162f * __temp_a121__);
float __temp_a123__ = (__tilevar_5__[__iter_35__+(EXTENT-__iter_0__)+FORMA_BLOCKDIM_X*(__iter_34__+1+(EXTENT-__iter_1__)+FORMA_BLOCKDIM_Y*mod(__iter_33__,rowz))]);
float __temp_a124__ = (__temp_a122__ + 0.163f * __temp_a123__);
float __temp_a125__ = (__tilevar_5__[__iter_35__+(EXTENT-__iter_0__)+FORMA_BLOCKDIM_X*(__iter_34__-1+(EXTENT-__iter_1__)+FORMA_BLOCKDIM_Y*mod(__iter_33__,rowz))]);
float __temp_a126__ = (__temp_a124__ + 0.164f * __temp_a125__);
float __temp_a127__ = (__tilevar_5__[__iter_35__+(EXTENT-__iter_0__)+FORMA_BLOCKDIM_X*(__iter_34__+(EXTENT-__iter_1__)+FORMA_BLOCKDIM_Y*mod(__iter_33__+1,rowz))]);
float __temp_a128__ = (__temp_a126__ + 0.165f * __temp_a127__);
float __temp_a129__ = (__tilevar_5__[__iter_35__+(EXTENT-__iter_0__)+FORMA_BLOCKDIM_X*(__iter_34__+(EXTENT-__iter_1__)+FORMA_BLOCKDIM_Y*mod(__iter_33__-1,rowz))]);
float __temp_a130__ = (__temp_a128__ + 0.166f * __temp_a129__);
float __temp_a131__ = (__tilevar_5__[__iter_35__+(EXTENT-__iter_0__)+FORMA_BLOCKDIM_X*(__iter_34__+(EXTENT-__iter_1__)+FORMA_BLOCKDIM_Y*mod(__iter_33__,rowz))]);
float __temp_a132__ = (__temp_a130__ - 1.670f * __temp_a131__);
__var_1__[__iter_35__+N*(__iter_34__+M*(__iter_33__))] = __temp_a132__;
}
}
}
}
}
/*Device code End */
/* Host Code Begin */
extern "C" void j3d7pt(float * h_input, int L, int M, int N, float * __var_0__){
/* Host allocation Begin */
float * input;
cudaMalloc(&input,sizeof(float)*(L*M*N));
Check_CUDA_Error("Allocation Error!! : input\n");
cudaPointerAttributes ptrAttrib_h_input;
cudaMemcpyKind memcpy_kind_h_input = cudaMemcpyHostToDevice;
if (cudaPointerGetAttributes(&ptrAttrib_h_input, h_input) == cudaSuccess)
if (ptrAttrib_h_input.memoryType == cudaMemoryTypeDevice)
memcpy_kind_h_input = cudaMemcpyDeviceToDevice;
cudaGetLastError();
if( memcpy_kind_h_input != cudaMemcpyDeviceToDevice ){
cudaMemcpy(input,h_input,sizeof(float)*(L*M*N), memcpy_kind_h_input);
}
float * __var_1__;
cudaMalloc(&__var_1__,sizeof(float)*(L*M*N));
Check_CUDA_Error("Allocation Error!! : __var_1__\n");
float * __copy_arr_0__;
cudaMalloc(&__copy_arr_0__,sizeof(float)*(L*M*N));
Check_CUDA_Error("Allocation Error!! : __copy_arr_0__\n");
float * __copy_arr_1__;
cudaMalloc(&__copy_arr_1__,sizeof(float)*(L*M*N));
Check_CUDA_Error("Allocation Error!! : __copy_arr_1__\n");
float * __copy_arr_2__;
cudaMalloc(&__copy_arr_2__,sizeof(float)*(L*M*N));
Check_CUDA_Error("Allocation Error!! : __copy_arr_2__\n");
/*Host Allocation End */
/* Kernel Launch Begin */
int __FORMA_MAX_SHARED_MEM__;
cudaDeviceGetAttribute(&__FORMA_MAX_SHARED_MEM__,cudaDevAttrMaxSharedMemoryPerBlock,0);
#ifdef _TIMER_
cudaEvent_t _forma_timer_start_,_forma_timer_stop_;
cudaEventCreate(&_forma_timer_start_);
cudaEventCreate(&_forma_timer_stop_);
cudaEventRecord(_forma_timer_start_,0);
#endif
int __size_0___kernel___forma_kernel__0__ = N;
int __size_1___kernel___forma_kernel__0__ = M;
int __block_0___kernel___forma_kernel__0__ = 32;
int __block_1___kernel___forma_kernel__0__ = 12;
int __block_2___kernel___forma_kernel__0__ = 4;
dim3 __blockConfig___kernel___forma_kernel__0__(__block_0___kernel___forma_kernel__0__,__block_1___kernel___forma_kernel__0__,__block_2___kernel___forma_kernel__0__);
int __SMemSize___kernel___forma_kernel__0__ = 0;
__SMemSize___kernel___forma_kernel__0__ = __blockSizeToSMemSize___kernel___forma_kernel__0__(__blockConfig___kernel___forma_kernel__0__);
int __grid_0___kernel___forma_kernel__0__ = FORMA_CEIL(__size_0___kernel___forma_kernel__0__,__blockConfig___kernel___forma_kernel__0__.x+GAPX);
int __grid_1___kernel___forma_kernel__0__ = FORMA_CEIL(__size_1___kernel___forma_kernel__0__,__blockConfig___kernel___forma_kernel__0__.y+GAPY);
int __grid_2___kernel___forma_kernel__0__ = 1;
dim3 __gridConfig___kernel___forma_kernel__0__(__grid_0___kernel___forma_kernel__0__,__grid_1___kernel___forma_kernel__0__,__grid_2___kernel___forma_kernel__0__);
dim3 unrollConfig (__blockConfig___kernel___forma_kernel__0__.x, __blockConfig___kernel___forma_kernel__0__.y/3, __blockConfig___kernel___forma_kernel__0__.z);
__kernel___forma_kernel__0__<<<__gridConfig___kernel___forma_kernel__0__, unrollConfig, __SMemSize___kernel___forma_kernel__0__>>> (input, L, M, N, __copy_arr_0__, __copy_arr_1__, __copy_arr_2__, __blockConfig___kernel___forma_kernel__0__.x, __blockConfig___kernel___forma_kernel__0__.y, __blockConfig___kernel___forma_kernel__0__.z, __var_1__);
Check_CUDA_Error("Kernel Launch Error!! : __kernel___forma_kernel__0__\n");
__kernel___forma_kernel__1__<<<__gridConfig___kernel___forma_kernel__0__, unrollConfig, __SMemSize___kernel___forma_kernel__0__>>> (input, L, M, N, __copy_arr_0__, __copy_arr_1__, __copy_arr_2__, __blockConfig___kernel___forma_kernel__0__.x, __blockConfig___kernel___forma_kernel__0__.y, __blockConfig___kernel___forma_kernel__0__.z, __var_1__);
Check_CUDA_Error("Kernel Launch Error!! : __kernel___forma_kernel__1__\n");
__kernel___forma_kernel__2__<<<__gridConfig___kernel___forma_kernel__0__, unrollConfig, __SMemSize___kernel___forma_kernel__0__>>> (input, L, M, N, __copy_arr_0__, __copy_arr_1__, __copy_arr_2__, __blockConfig___kernel___forma_kernel__0__.x, __blockConfig___kernel___forma_kernel__0__.y, __blockConfig___kernel___forma_kernel__0__.z, __var_1__);
Check_CUDA_Error("Kernel Launch Error!! : __kernel___forma_kernel__2__\n");
__kernel___forma_kernel__3__<<<__gridConfig___kernel___forma_kernel__0__, unrollConfig, __SMemSize___kernel___forma_kernel__0__>>> (input, L, M, N, __copy_arr_0__, __copy_arr_1__, __copy_arr_2__, __blockConfig___kernel___forma_kernel__0__.x, __blockConfig___kernel___forma_kernel__0__.y, __blockConfig___kernel___forma_kernel__0__.z, __var_1__);
Check_CUDA_Error("Kernel Launch Error!! : __kernel___forma_kernel__3__\n");
cudaPointerAttributes ptrAttrib___var_0__;
cudaMemcpyKind memcpy_kind___var_0__ = cudaMemcpyDeviceToHost;
if (cudaPointerGetAttributes(&ptrAttrib___var_0__, __var_0__) == cudaSuccess)
if (ptrAttrib___var_0__.memoryType == cudaMemoryTypeDevice)
memcpy_kind___var_0__ = cudaMemcpyDeviceToDevice;
cudaGetLastError();
cudaMemcpy(__var_0__,__var_1__, sizeof(float)*(L*M*N), memcpy_kind___var_0__);
#ifdef _TIMER_
cudaEventRecord(_forma_timer_stop_,0);
cudaEventSynchronize(_forma_timer_stop_);
float elapsedTime;
cudaEventElapsedTime(&elapsedTime,_forma_timer_start_,_forma_timer_stop_);
printf("[FORMA] Computation Time(ms) : %lf\n",elapsedTime);
cudaEventDestroy(_forma_timer_start_);
cudaEventDestroy(_forma_timer_stop_);
#endif
/*Kernel Launch End */
/* Host Free Begin */
cudaFree(input);
cudaFree(__var_1__);
cudaFree(__copy_arr_0__);
cudaFree(__copy_arr_1__);
cudaFree(__copy_arr_2__);
}
/*Host Free End*/
|
987b6b0833c5d72a7b4efe956a31d2102f1da098.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "device_launch_parameters.h"
#include <stdio.h>
#include <iostream>
//#include "common.h"
void radix_sort_cpu_1(int * input, const int array_size)
{
int * temp_a = (int*)malloc(sizeof(int)*array_size);
int * temp_b = (int*)malloc(sizeof(int)*array_size);
int count_a = 0;
int count_b = 0;
int mask = 0;
for (int i = 0; i < 32; i++)
{
count_a = 0;
count_b = 0;
mask = 1 << i;
for (int j = 0; j < array_size; j++)
{
int temp = input[j];
if (temp& mask)
{
temp_b[count_b] = input[j];
count_b++;
}
else
{
temp_a[count_a] = input[j];
count_a++;
}
}
//reorder the input depend on the result of this iteration
for (int k = 0; k < count_a; k++)
{
input[k] = temp_a[k];
}
for (int l = 0; l < count_b; l++)
{
input[count_a + l] = temp_b[l];
}
/*for (int f = 0; f < array_size; f++)
{
if (!(f == (array_size - 1)))
{
printf("%d,", input[f]);
}
else
{
printf("%d \n", input[f]);
}
}*/
}
}
__global__ void radix_sort__gpu_01(int* input, int array_size)
{
for (int i = 0; i < 32; i++)
{
}
}
void run_code_radix_sort()
{
int array_size = 1 << 24;
int byte_array_size = sizeof(int)*array_size;
//memory allocation and initialization of array
int * h_array, *h_ref;
h_array = (int*)malloc(byte_array_size);
h_ref = (int*)malloc(byte_array_size);
for (int i = 0; i < array_size; i++)
{
h_array[i] = i % 10;
}
dim3 grid(32);
dim3 block(array_size / grid.x);
int *d_array;
hipMalloc((int**)&d_array, byte_array_size);
hipMemcpy(d_array, h_array, byte_array_size, hipMemcpyHostToDevice);
radix_sort__gpu_01 << <grid, block >> >(d_array, array_size);
hipMemcpy(h_ref, d_array, byte_array_size, hipMemcpyDeviceToHost);
hipFree(d_array);
free(h_ref);
free(h_array);
}
//int main()
//{
// int * int_array;
// int array_size = 20;
// int_array = (int*)malloc(sizeof(int)*array_size);
// for (int i = 0; i < array_size; i++)
// {
// int_array[i] = i % 10;
// }
//
// radix_sort_cpu_1(int_array, array_size);
// print_array_radix_sort(int_array, array_size);
//
// free(int_array);
// system("pause");
// return 0;
//} | 987b6b0833c5d72a7b4efe956a31d2102f1da098.cu | #include "cuda_runtime.h"
#include "device_launch_parameters.h"
#include <stdio.h>
#include <iostream>
//#include "common.h"
void radix_sort_cpu_1(int * input, const int array_size)
{
int * temp_a = (int*)malloc(sizeof(int)*array_size);
int * temp_b = (int*)malloc(sizeof(int)*array_size);
int count_a = 0;
int count_b = 0;
int mask = 0;
for (int i = 0; i < 32; i++)
{
count_a = 0;
count_b = 0;
mask = 1 << i;
for (int j = 0; j < array_size; j++)
{
int temp = input[j];
if (temp& mask)
{
temp_b[count_b] = input[j];
count_b++;
}
else
{
temp_a[count_a] = input[j];
count_a++;
}
}
//reorder the input depend on the result of this iteration
for (int k = 0; k < count_a; k++)
{
input[k] = temp_a[k];
}
for (int l = 0; l < count_b; l++)
{
input[count_a + l] = temp_b[l];
}
/*for (int f = 0; f < array_size; f++)
{
if (!(f == (array_size - 1)))
{
printf("%d,", input[f]);
}
else
{
printf("%d \n", input[f]);
}
}*/
}
}
__global__ void radix_sort__gpu_01(int* input, int array_size)
{
for (int i = 0; i < 32; i++)
{
}
}
void run_code_radix_sort()
{
int array_size = 1 << 24;
int byte_array_size = sizeof(int)*array_size;
//memory allocation and initialization of array
int * h_array, *h_ref;
h_array = (int*)malloc(byte_array_size);
h_ref = (int*)malloc(byte_array_size);
for (int i = 0; i < array_size; i++)
{
h_array[i] = i % 10;
}
dim3 grid(32);
dim3 block(array_size / grid.x);
int *d_array;
cudaMalloc((int**)&d_array, byte_array_size);
cudaMemcpy(d_array, h_array, byte_array_size, cudaMemcpyHostToDevice);
radix_sort__gpu_01 << <grid, block >> >(d_array, array_size);
cudaMemcpy(h_ref, d_array, byte_array_size, cudaMemcpyDeviceToHost);
cudaFree(d_array);
free(h_ref);
free(h_array);
}
//int main()
//{
// int * int_array;
// int array_size = 20;
// int_array = (int*)malloc(sizeof(int)*array_size);
// for (int i = 0; i < array_size; i++)
// {
// int_array[i] = i % 10;
// }
//
// radix_sort_cpu_1(int_array, array_size);
// print_array_radix_sort(int_array, array_size);
//
// free(int_array);
// system("pause");
// return 0;
//} |
c4b2a0fb95b367d6575aefd62bc9e39aceef3629.hip | // !!! This is a file automatically generated by hipify!!!
/*M///////////////////////////////////////////////////////////////////////////////////////
//
// IMPORTANT: READ BEFORE DOWNLOADING, COPYING, INSTALLING OR USING.
//
// By downloading, copying, installing or using the software you agree to this license.
// If you do not agree to this license, do not download, install,
// copy or use the software.
//
//
// License Agreement
// For Open Source Computer Vision Library
//
// Copyright (C) 2000-2008, Intel Corporation, all rights reserved.
// Copyright (C) 2009, Willow Garage Inc., all rights reserved.
// Third party copyrights are property of their respective owners.
//
// Redistribution and use in source and binary forms, with or without modification,
// are permitted provided that the following conditions are met:
//
// * Redistribution's of source code must retain the above copyright notice,
// this list of conditions and the following disclaimer.
//
// * Redistribution's in binary form must reproduce the above copyright notice,
// this list of conditions and the following disclaimer in the documentation
// and/or other materials provided with the distribution.
//
// * The name of the copyright holders may not be used to endorse or promote products
// derived from this software without specific prior written permission.
//
// This software is provided by the copyright holders and contributors "as is" and
// any express or implied warranties, including, but not limited to, the implied
// warranties of merchantability and fitness for a particular purpose are disclaimed.
// In no event shall the Intel Corporation or contributors be liable for any direct,
// indirect, incidental, special, exemplary, or consequential damages
// (including, but not limited to, procurement of substitute goods or services;
// loss of use, data, or profits; or business interruption) however caused
// and on any theory of liability, whether in contract, strict liability,
// or tort (including negligence or otherwise) arising in any way out of
// the use of this software, even if advised of the possibility of such damage.
//
//M*/
#if !defined CUDA_DISABLER
#include "row_filter.h"
namespace filter
{
template void linearRow<ushort3, float3>(PtrStepSzb src, PtrStepSzb dst, const float* kernel, int ksize, int anchor, int brd_type, int cc, hipStream_t stream);
}
#endif /* CUDA_DISABLER */
| c4b2a0fb95b367d6575aefd62bc9e39aceef3629.cu | /*M///////////////////////////////////////////////////////////////////////////////////////
//
// IMPORTANT: READ BEFORE DOWNLOADING, COPYING, INSTALLING OR USING.
//
// By downloading, copying, installing or using the software you agree to this license.
// If you do not agree to this license, do not download, install,
// copy or use the software.
//
//
// License Agreement
// For Open Source Computer Vision Library
//
// Copyright (C) 2000-2008, Intel Corporation, all rights reserved.
// Copyright (C) 2009, Willow Garage Inc., all rights reserved.
// Third party copyrights are property of their respective owners.
//
// Redistribution and use in source and binary forms, with or without modification,
// are permitted provided that the following conditions are met:
//
// * Redistribution's of source code must retain the above copyright notice,
// this list of conditions and the following disclaimer.
//
// * Redistribution's in binary form must reproduce the above copyright notice,
// this list of conditions and the following disclaimer in the documentation
// and/or other materials provided with the distribution.
//
// * The name of the copyright holders may not be used to endorse or promote products
// derived from this software without specific prior written permission.
//
// This software is provided by the copyright holders and contributors "as is" and
// any express or implied warranties, including, but not limited to, the implied
// warranties of merchantability and fitness for a particular purpose are disclaimed.
// In no event shall the Intel Corporation or contributors be liable for any direct,
// indirect, incidental, special, exemplary, or consequential damages
// (including, but not limited to, procurement of substitute goods or services;
// loss of use, data, or profits; or business interruption) however caused
// and on any theory of liability, whether in contract, strict liability,
// or tort (including negligence or otherwise) arising in any way out of
// the use of this software, even if advised of the possibility of such damage.
//
//M*/
#if !defined CUDA_DISABLER
#include "row_filter.h"
namespace filter
{
template void linearRow<ushort3, float3>(PtrStepSzb src, PtrStepSzb dst, const float* kernel, int ksize, int anchor, int brd_type, int cc, cudaStream_t stream);
}
#endif /* CUDA_DISABLER */
|
4901c3da334a5f44bac12e205c061478a6fc1351.hip | // !!! This is a file automatically generated by hipify!!!
#include "cudaLocks.h"
hipError_t cudaLocksInit(const int maxBlocksPerKernel, const int numMutexes,
const int numSemaphores,
const bool pageAlign, /* const region_t locksReg*/
const int NUM_SM)
{
if (maxBlocksPerKernel <= 0) return hipErrorInitializationError;
if (numMutexes <= 0) return hipErrorInitializationError;
if (numSemaphores <= 0) return hipErrorInitializationError;
cudaLockData_t * cpuLockData_temp = (cudaLockData_t *)malloc(sizeof(cudaLockData_t) + 0x1000);
if (pageAlign) {
cpuLockData = (cudaLockData_t *)(((((unsigned long long)cpuLockData_temp) >> 12) << 12) + 0x1000);
} else {
cpuLockData = cpuLockData_temp;
}
// initialize some of the lock data's values
cpuLockData->maxBufferSize = maxBlocksPerKernel;
cpuLockData->arrayStride = (maxBlocksPerKernel + NUM_SM) / 16 * 16;
cpuLockData->mutexCount = numMutexes;
cpuLockData->semaphoreCount = numSemaphores;
// malloc arrays for the lock data structure
unsigned int * barrierBuffers_temp = (unsigned int *)malloc((sizeof(unsigned int) * cpuLockData->arrayStride * 2) + 0x1000);
int * mutexBuffers_temp = (int *)malloc((sizeof(int) * cpuLockData->arrayStride * cpuLockData->mutexCount) + 0x1000);
unsigned int * mutexBufferHeads_temp = (unsigned int *)malloc((sizeof(unsigned int) * cpuLockData->mutexCount) + 0x1000);
unsigned int * mutexBufferTails_temp = (unsigned int *)malloc((sizeof(unsigned int) * cpuLockData->mutexCount) + 0x1000);
unsigned int * semaphoreBuffers_temp = (unsigned int *)malloc((sizeof(unsigned int) * 4 * cpuLockData->semaphoreCount) + 0x1000);
if (pageAlign) {
cpuLockData->barrierBuffers = (unsigned int *)(((((unsigned long long)barrierBuffers_temp) >> 12) << 12) + 0x1000);
cpuLockData->mutexBuffers = (int *)(((((unsigned long long)mutexBuffers_temp) >> 12) << 12) + 0x1000);
cpuLockData->mutexBufferHeads = (unsigned int *)(((((unsigned long long)mutexBufferHeads_temp) >> 12) << 12) + 0x1000);
cpuLockData->mutexBufferTails = (unsigned int *)(((((unsigned long long)mutexBufferTails_temp) >> 12) << 12) + 0x1000);
cpuLockData->semaphoreBuffers = (unsigned int *)(((((unsigned long long)semaphoreBuffers_temp) >> 12) << 12) + 0x1000);
} else {
cpuLockData->barrierBuffers = barrierBuffers_temp;
cpuLockData->mutexBuffers = mutexBuffers_temp;
cpuLockData->mutexBufferHeads = mutexBufferHeads_temp;
cpuLockData->mutexBufferTails = mutexBufferTails_temp;
cpuLockData->semaphoreBuffers = semaphoreBuffers_temp;
}
// initialize all memory
int i = 0;
for (i = 0; i < (cpuLockData->arrayStride * 2); ++i) {
cpuLockData->barrierBuffers[i] = 0;
}
for (i = 0; i < (cpuLockData->arrayStride * cpuLockData->mutexCount); ++i) {
// set the first location for each SM to 1 so that the ring buffer can be
// used by the first TB right away (otherwise livelock because no locations
// ever == 1)
if (i % cpuLockData->arrayStride == 0) { cpuLockData->mutexBuffers[i] = 1; }
// for all other locations initialize to -1 so TBs for these locations
// don't think it's their turn right away
else { cpuLockData->mutexBuffers[i] = -1; }
}
for (i = 0; i < cpuLockData->mutexCount; ++i) {
cpuLockData->mutexBufferHeads[i] = 0;
cpuLockData->mutexBufferTails[i] = 0;
}
for (i = 0; i < (cpuLockData->semaphoreCount * 4); ++i) {
cpuLockData->semaphoreBuffers[i] = 0;
}
return hipSuccess;
}
hipError_t cudaLocksDestroy()
{
if (cpuLockData == NULL) { return hipErrorInitializationError; }
free(cpuLockData->mutexBuffers);
free(cpuLockData->mutexBufferHeads);
free(cpuLockData->mutexBufferTails);
free(cpuLockData->semaphoreBuffers);
free(cpuLockData);
return hipSuccess;
}
| 4901c3da334a5f44bac12e205c061478a6fc1351.cu | #include "cudaLocks.h"
cudaError_t cudaLocksInit(const int maxBlocksPerKernel, const int numMutexes,
const int numSemaphores,
const bool pageAlign, /* const region_t locksReg*/
const int NUM_SM)
{
if (maxBlocksPerKernel <= 0) return cudaErrorInitializationError;
if (numMutexes <= 0) return cudaErrorInitializationError;
if (numSemaphores <= 0) return cudaErrorInitializationError;
cudaLockData_t * cpuLockData_temp = (cudaLockData_t *)malloc(sizeof(cudaLockData_t) + 0x1000);
if (pageAlign) {
cpuLockData = (cudaLockData_t *)(((((unsigned long long)cpuLockData_temp) >> 12) << 12) + 0x1000);
} else {
cpuLockData = cpuLockData_temp;
}
// initialize some of the lock data's values
cpuLockData->maxBufferSize = maxBlocksPerKernel;
cpuLockData->arrayStride = (maxBlocksPerKernel + NUM_SM) / 16 * 16;
cpuLockData->mutexCount = numMutexes;
cpuLockData->semaphoreCount = numSemaphores;
// malloc arrays for the lock data structure
unsigned int * barrierBuffers_temp = (unsigned int *)malloc((sizeof(unsigned int) * cpuLockData->arrayStride * 2) + 0x1000);
int * mutexBuffers_temp = (int *)malloc((sizeof(int) * cpuLockData->arrayStride * cpuLockData->mutexCount) + 0x1000);
unsigned int * mutexBufferHeads_temp = (unsigned int *)malloc((sizeof(unsigned int) * cpuLockData->mutexCount) + 0x1000);
unsigned int * mutexBufferTails_temp = (unsigned int *)malloc((sizeof(unsigned int) * cpuLockData->mutexCount) + 0x1000);
unsigned int * semaphoreBuffers_temp = (unsigned int *)malloc((sizeof(unsigned int) * 4 * cpuLockData->semaphoreCount) + 0x1000);
if (pageAlign) {
cpuLockData->barrierBuffers = (unsigned int *)(((((unsigned long long)barrierBuffers_temp) >> 12) << 12) + 0x1000);
cpuLockData->mutexBuffers = (int *)(((((unsigned long long)mutexBuffers_temp) >> 12) << 12) + 0x1000);
cpuLockData->mutexBufferHeads = (unsigned int *)(((((unsigned long long)mutexBufferHeads_temp) >> 12) << 12) + 0x1000);
cpuLockData->mutexBufferTails = (unsigned int *)(((((unsigned long long)mutexBufferTails_temp) >> 12) << 12) + 0x1000);
cpuLockData->semaphoreBuffers = (unsigned int *)(((((unsigned long long)semaphoreBuffers_temp) >> 12) << 12) + 0x1000);
} else {
cpuLockData->barrierBuffers = barrierBuffers_temp;
cpuLockData->mutexBuffers = mutexBuffers_temp;
cpuLockData->mutexBufferHeads = mutexBufferHeads_temp;
cpuLockData->mutexBufferTails = mutexBufferTails_temp;
cpuLockData->semaphoreBuffers = semaphoreBuffers_temp;
}
// initialize all memory
int i = 0;
for (i = 0; i < (cpuLockData->arrayStride * 2); ++i) {
cpuLockData->barrierBuffers[i] = 0;
}
for (i = 0; i < (cpuLockData->arrayStride * cpuLockData->mutexCount); ++i) {
// set the first location for each SM to 1 so that the ring buffer can be
// used by the first TB right away (otherwise livelock because no locations
// ever == 1)
if (i % cpuLockData->arrayStride == 0) { cpuLockData->mutexBuffers[i] = 1; }
// for all other locations initialize to -1 so TBs for these locations
// don't think it's their turn right away
else { cpuLockData->mutexBuffers[i] = -1; }
}
for (i = 0; i < cpuLockData->mutexCount; ++i) {
cpuLockData->mutexBufferHeads[i] = 0;
cpuLockData->mutexBufferTails[i] = 0;
}
for (i = 0; i < (cpuLockData->semaphoreCount * 4); ++i) {
cpuLockData->semaphoreBuffers[i] = 0;
}
return cudaSuccess;
}
cudaError_t cudaLocksDestroy()
{
if (cpuLockData == NULL) { return cudaErrorInitializationError; }
free(cpuLockData->mutexBuffers);
free(cpuLockData->mutexBufferHeads);
free(cpuLockData->mutexBufferTails);
free(cpuLockData->semaphoreBuffers);
free(cpuLockData);
return cudaSuccess;
}
|
2693a22cd61e24dac011cdc5129dc28a0af68149.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include<stdio.h>
#include<cuda.h>
#include<malloc.h>
int *a,*c;
int *ga,*gc;
int sizerowa;
int numOfBlocks;
int blocksize=256;
float et;
__global__ void VectorMatrix(int* ga,int* gc,int sizerowa,int *secondtime,int count_take_first_second)
{
extern __shared__ int s[];
int i=threadIdx.x;
//copy max in s[i]
s[i]=99999;
__syncthreads();
//copy the values from d_var in s[i]
int index = threadIdx.x+(blockDim.x*blockIdx.x);
if (index<sizerowa)
if (count_take_first_second==0)
{
s[i]=ga[index];
}
else
{
s[i]=secondtime[index];
}
__syncthreads();
//printf ("%d ",s[i]);
//Do the reduction
if (blockDim.x>=256)
{
if(i<128)
{
if(s[i]>s[i+128])
{
s[i]= s[i+128];
}
}
__syncthreads( );
}
if (blockDim.x>=128)
{
if(i<64)
{
if(s[i]>s[i+64])
{
s[i]= s[i+64];
}
}
__syncthreads( );
}
if (blockDim.x>=64)
{
if(i<32)
{
if(s[i]>s[i+32])
{
s[i]= s[i+32];
}
}
__syncthreads( );
}
if (blockDim.x>=32)
{
if(i<16)
{
if(s[i]>s[i+16])
{
s[i]= s[i+16];
}
}
__syncthreads( );
}
if (blockDim.x>=16)
{
if(i<8)
{
if(s[i]>s[i+8])
{
s[i]= s[i+8];
}
}
__syncthreads( );
}
if (blockDim.x>=8)
{
if(i<4)
{
if(s[i]>s[i+4])
{
s[i]= s[i+4];
}
}
__syncthreads( );
}
if (blockDim.x>=4)
{
if(i<2)
{
if(s[i]>s[i+2])
{
s[i]= s[i+2];
}
}
__syncthreads( );
}
if (blockDim.x>=2)
{
if(i<1)
{
if(s[i]>s[i+1])
{
s[i]= s[i+1];
}
}
__syncthreads( );
}
//Thread zero will store minimum of this block in d_Min
if(i==0)
{
*(gc+blockIdx.x)=s[0];
//printf("\nblock minimum value %d\n",s[0]);
}
}
int main()
{
printf("Enter user matrix siz\n");
scanf("%d", &sizerowa );
a=(int*)malloc(sizerowa*sizeof(int));
c=(int*)malloc((sizerowa)*sizeof(int));
int i=0;
int flag = 0 ;
int current_size = sizerowa;
int count_first_second = 0 ;
for(i=0;i<sizerowa;i++)
{
*(a+i)=i+2;
}
//test only for size greater than 1000
//*(a+290)= -5 ;
//*(a+800)=-6;
numOfBlocks=sizerowa/blocksize;
if(sizerowa%blocksize>0) numOfBlocks++;
hipMalloc((void**)&ga,sizerowa*sizeof(int));
hipMalloc((void**)&gc,sizerowa*sizeof(int));
hipMemcpy(ga,a,sizerowa*sizeof(int),hipMemcpyHostToDevice);
hipEvent_t start,stop;
hipEventCreate(&start);
hipEventCreate(&stop);
hipEventRecord(start,0);
do{
numOfBlocks=current_size/blocksize;
if( current_size%blocksize>0) numOfBlocks++;
if (current_size<blocksize)
{
flag=1 ;
}
hipLaunchKernelGGL((
VectorMatrix), dim3(numOfBlocks),dim3(blocksize),blocksize*sizeof(int), 0, ga,gc,current_size,gc,count_first_second);
if (current_size%2==0)
{
current_size=current_size/2;
}
else{
current_size=current_size/2+1;
}
count_first_second++;
}while(flag!=1);
hipEventRecord(stop,0);
hipEventSynchronize(stop);
hipEventElapsedTime(&et,start,stop);
hipEventDestroy(start);
hipEventDestroy(stop);
hipDeviceSynchronize();
hipMemcpy(c,gc,1*sizeof(int),hipMemcpyDeviceToHost);
printf(" parallel %f\n",et);
printf("%d",*(c+0));
return 0 ;
}
| 2693a22cd61e24dac011cdc5129dc28a0af68149.cu | #include<stdio.h>
#include<cuda.h>
#include<malloc.h>
int *a,*c;
int *ga,*gc;
int sizerowa;
int numOfBlocks;
int blocksize=256;
float et;
__global__ void VectorMatrix(int* ga,int* gc,int sizerowa,int *secondtime,int count_take_first_second)
{
extern __shared__ int s[];
int i=threadIdx.x;
//copy max in s[i]
s[i]=99999;
__syncthreads();
//copy the values from d_var in s[i]
int index = threadIdx.x+(blockDim.x*blockIdx.x);
if (index<sizerowa)
if (count_take_first_second==0)
{
s[i]=ga[index];
}
else
{
s[i]=secondtime[index];
}
__syncthreads();
//printf ("%d ",s[i]);
//Do the reduction
if (blockDim.x>=256)
{
if(i<128)
{
if(s[i]>s[i+128])
{
s[i]= s[i+128];
}
}
__syncthreads( );
}
if (blockDim.x>=128)
{
if(i<64)
{
if(s[i]>s[i+64])
{
s[i]= s[i+64];
}
}
__syncthreads( );
}
if (blockDim.x>=64)
{
if(i<32)
{
if(s[i]>s[i+32])
{
s[i]= s[i+32];
}
}
__syncthreads( );
}
if (blockDim.x>=32)
{
if(i<16)
{
if(s[i]>s[i+16])
{
s[i]= s[i+16];
}
}
__syncthreads( );
}
if (blockDim.x>=16)
{
if(i<8)
{
if(s[i]>s[i+8])
{
s[i]= s[i+8];
}
}
__syncthreads( );
}
if (blockDim.x>=8)
{
if(i<4)
{
if(s[i]>s[i+4])
{
s[i]= s[i+4];
}
}
__syncthreads( );
}
if (blockDim.x>=4)
{
if(i<2)
{
if(s[i]>s[i+2])
{
s[i]= s[i+2];
}
}
__syncthreads( );
}
if (blockDim.x>=2)
{
if(i<1)
{
if(s[i]>s[i+1])
{
s[i]= s[i+1];
}
}
__syncthreads( );
}
//Thread zero will store minimum of this block in d_Min
if(i==0)
{
*(gc+blockIdx.x)=s[0];
//printf("\nblock minimum value %d\n",s[0]);
}
}
int main()
{
printf("Enter user matrix siz\n");
scanf("%d", &sizerowa );
a=(int*)malloc(sizerowa*sizeof(int));
c=(int*)malloc((sizerowa)*sizeof(int));
int i=0;
int flag = 0 ;
int current_size = sizerowa;
int count_first_second = 0 ;
for(i=0;i<sizerowa;i++)
{
*(a+i)=i+2;
}
//test only for size greater than 1000
//*(a+290)= -5 ;
//*(a+800)=-6;
numOfBlocks=sizerowa/blocksize;
if(sizerowa%blocksize>0) numOfBlocks++;
cudaMalloc((void**)&ga,sizerowa*sizeof(int));
cudaMalloc((void**)&gc,sizerowa*sizeof(int));
cudaMemcpy(ga,a,sizerowa*sizeof(int),cudaMemcpyHostToDevice);
cudaEvent_t start,stop;
cudaEventCreate(&start);
cudaEventCreate(&stop);
cudaEventRecord(start,0);
do{
numOfBlocks=current_size/blocksize;
if( current_size%blocksize>0) numOfBlocks++;
if (current_size<blocksize)
{
flag=1 ;
}
VectorMatrix<<<numOfBlocks,blocksize,blocksize*sizeof(int)>>>(ga,gc,current_size,gc,count_first_second);
if (current_size%2==0)
{
current_size=current_size/2;
}
else{
current_size=current_size/2+1;
}
count_first_second++;
}while(flag!=1);
cudaEventRecord(stop,0);
cudaEventSynchronize(stop);
cudaEventElapsedTime(&et,start,stop);
cudaEventDestroy(start);
cudaEventDestroy(stop);
cudaDeviceSynchronize();
cudaMemcpy(c,gc,1*sizeof(int),cudaMemcpyDeviceToHost);
printf(" parallel %f\n",et);
printf("%d",*(c+0));
return 0 ;
}
|
244054663e59127658bc35be980d703fa10a5ae4.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <math.h>
#ifdef __cplusplus
extern "C" {
#endif
__global__ void abs64(double* A, int size)
{
int blockId = blockIdx.x + blockIdx.y * gridDim.x + gridDim.x * gridDim.y * blockIdx.z;
int idx = blockId * (blockDim.x * blockDim.y * blockDim.z) + (threadIdx.z * (blockDim.x * blockDim.y)) + (threadIdx.y * blockDim.x) + threadIdx.x;
if (idx >= size) {
return;
}
A[idx] = fabs(A[idx]);
}
#ifdef __cplusplus
}
#endif | 244054663e59127658bc35be980d703fa10a5ae4.cu | #include <math.h>
#ifdef __cplusplus
extern "C" {
#endif
__global__ void abs64(double* A, int size)
{
int blockId = blockIdx.x + blockIdx.y * gridDim.x + gridDim.x * gridDim.y * blockIdx.z;
int idx = blockId * (blockDim.x * blockDim.y * blockDim.z) + (threadIdx.z * (blockDim.x * blockDim.y)) + (threadIdx.y * blockDim.x) + threadIdx.x;
if (idx >= size) {
return;
}
A[idx] = fabs(A[idx]);
}
#ifdef __cplusplus
}
#endif |
96dc5e332f940693d9cb4d62e6f2d7587bae0083.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "Utilities.h"
#include <functional>
#include "gpu_map.cuh"
#include "par_map.h"
#include "CudaUtilities_hip.cuh"
#include <assert.h>
#include <iostream>
#include "Timer.h"
using namespace std;
void example_gpu_map() {
const int w = 100;
const int h = 80;
const size_t sz = w * h * sizeof(int);
int* src = nullptr;
hipMallocManaged(&src, sz);
int* dest = nullptr;
hipMallocManaged(&dest, sz);
par_fill(src, w, h, 12);
auto f = [] __host__ __device__ (int x) { return x + x; };
gpu_map(src, dest, w, h, f);
// dest should contain f(12) and not 12 anymore
std::cout << "Val " << dest[0] << std::endl;
assert(dest[0] == f(12));
assert(dest[(w - 1)*(h - 1) - 1] == f(12));
hipFree(src);
hipFree(dest);
}
void example_gpu_map_large() {
const int w = 10 * 1024;
const int h = 80 * 1024;
const size_t sz = w * h * sizeof(int);
int* src = nullptr;
hipMallocManaged(&src, sz);
int* dest = nullptr;
hipMallocManaged(&dest, sz);
par_fill(src, w, h, 12);
auto f = [] __host__ __device__(int x) { return x + x; };
gpu_map(src, dest, w, h, f);
// dest should contain 2*12 and not 12 anymore
std::cout << "Val " << dest[0] << std::endl;
assert(dest[0] == f(12));
assert(dest[(w - 1)*(h - 1) - 1] == f(12));
hipFree(src);
hipFree(dest);
}
void example_gpu_map_generic(dim3 block, int w, int h) {
cout << "(" << block.x << "," << block.y << "," << block.z << "): ";
const size_t sz = w * h * sizeof(int);
auto t = new Timer();
int* src = nullptr;
hipMallocManaged(&src, sz);
int* dest = nullptr;
hipMallocManaged(&dest, sz);
par_fill(src, w, h, 12);
auto f = [] __host__ __device__(int x) { return x + x; };
t->start();
gpu_map(block, src, dest, w, h, f);
t->stop();
cout << "duration: " << t->delta() << endl;
// if dest[] is not used, it is not calculated
cerr << "needed on Mac OS X=" << dest[0] << endl;
//assert(dest[0] != 12345);
hipFree(src);
hipFree(dest);
}
void bench_map_blocks() {
int w = 1024 * 8;
int h = 1024 * 8;
example_gpu_map_generic(dim3(128, 1, 1), w, h);
example_gpu_map_generic(dim3(32, 4, 1), w, h);
example_gpu_map_generic(dim3(16, 8, 1), w, h);
example_gpu_map_generic(dim3(8, 16, 1), w, h);
example_gpu_map_generic(dim3(4, 32, 1), w, h);
example_gpu_map_generic(dim3(1, 128, 1), w, h);
}
| 96dc5e332f940693d9cb4d62e6f2d7587bae0083.cu | #include "cuda_runtime.h"
#include "Utilities.h"
#include <functional>
#include "gpu_map.cuh"
#include "par_map.h"
#include "CudaUtilities.cuh"
#include <assert.h>
#include <iostream>
#include "Timer.h"
using namespace std;
void example_gpu_map() {
const int w = 100;
const int h = 80;
const size_t sz = w * h * sizeof(int);
int* src = nullptr;
cudaMallocManaged(&src, sz);
int* dest = nullptr;
cudaMallocManaged(&dest, sz);
par_fill(src, w, h, 12);
auto f = [] __host__ __device__ (int x) { return x + x; };
gpu_map(src, dest, w, h, f);
// dest should contain f(12) and not 12 anymore
std::cout << "Val " << dest[0] << std::endl;
assert(dest[0] == f(12));
assert(dest[(w - 1)*(h - 1) - 1] == f(12));
cudaFree(src);
cudaFree(dest);
}
void example_gpu_map_large() {
const int w = 10 * 1024;
const int h = 80 * 1024;
const size_t sz = w * h * sizeof(int);
int* src = nullptr;
cudaMallocManaged(&src, sz);
int* dest = nullptr;
cudaMallocManaged(&dest, sz);
par_fill(src, w, h, 12);
auto f = [] __host__ __device__(int x) { return x + x; };
gpu_map(src, dest, w, h, f);
// dest should contain 2*12 and not 12 anymore
std::cout << "Val " << dest[0] << std::endl;
assert(dest[0] == f(12));
assert(dest[(w - 1)*(h - 1) - 1] == f(12));
cudaFree(src);
cudaFree(dest);
}
void example_gpu_map_generic(dim3 block, int w, int h) {
cout << "(" << block.x << "," << block.y << "," << block.z << "): ";
const size_t sz = w * h * sizeof(int);
auto t = new Timer();
int* src = nullptr;
cudaMallocManaged(&src, sz);
int* dest = nullptr;
cudaMallocManaged(&dest, sz);
par_fill(src, w, h, 12);
auto f = [] __host__ __device__(int x) { return x + x; };
t->start();
gpu_map(block, src, dest, w, h, f);
t->stop();
cout << "duration: " << t->delta() << endl;
// if dest[] is not used, it is not calculated
cerr << "needed on Mac OS X=" << dest[0] << endl;
//assert(dest[0] != 12345);
cudaFree(src);
cudaFree(dest);
}
void bench_map_blocks() {
int w = 1024 * 8;
int h = 1024 * 8;
example_gpu_map_generic(dim3(128, 1, 1), w, h);
example_gpu_map_generic(dim3(32, 4, 1), w, h);
example_gpu_map_generic(dim3(16, 8, 1), w, h);
example_gpu_map_generic(dim3(8, 16, 1), w, h);
example_gpu_map_generic(dim3(4, 32, 1), w, h);
example_gpu_map_generic(dim3(1, 128, 1), w, h);
}
|
bb39c179e4218aaa66105dac59435d09f577fabf.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
// cudamatrix/cu-kernels.cu
// Copyright 2009-2012 Karel Vesely
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// THIS CODE IS PROVIDED *AS IS* BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
// KIND, EITHER EXPRESS OR IMPLIED, INCLUDING WITHOUT LIMITATION ANY IMPLIED
// WARRANTIES OR CONDITIONS OF TITLE, FITNESS FOR A PARTICULAR PURPOSE,
// MERCHANTABLITY OR NON-INFRINGEMENT.
// See the Apache 2 License for the specific language governing permissions and
// limitations under the License.
// In this file is the CUDA code of the CUDA kernels, plus the ANSI-C wrappers
#include <cfloat>
#include "cu-kernels-ansi.h"
/***********************************************************************
* Generic __device__ functions
*/
template<typename Real>
__device__
static Real _sum_reduce(Real buffer[]) {
// Total number of active threads
int32_cuda nTotalThreads = blockDim.x;
__syncthreads();
// perform tree-based reduction (sum)
while(nTotalThreads > 1) {
int32_cuda halfPoint = ((1+nTotalThreads) >> 1); // divide by two
// only the first half of the threads will be active.
if (threadIdx.x < halfPoint) {
// Get the shared value stored by another thread
Real temp = 0.0;
if(threadIdx.x+halfPoint < nTotalThreads) {
temp = buffer[threadIdx.x + halfPoint];
}
buffer[threadIdx.x] += temp;
}
__syncthreads();
nTotalThreads = ((1+nTotalThreads) >> 1); // divide by two.
}
// the result
return buffer[0];
}
template<typename Real>
__device__
static Real _max_reduce(Real buffer[]) {
// Total number of active threads
int32_cuda nTotalThreads = blockDim.x;
__syncthreads();
// perform tree-based reduction (max)
while(nTotalThreads > 1) {
int32_cuda halfPoint = ((1+nTotalThreads) >> 1); // divide by two
// only the first half of the threads will be active.
if (threadIdx.x < halfPoint) {
// Get the shared value stored by another thread
Real temp = -1e20;
if(threadIdx.x+halfPoint < nTotalThreads) {
temp = buffer[threadIdx.x + halfPoint];
}
if (temp > buffer[threadIdx.x]) buffer[threadIdx.x] = temp;
}
__syncthreads();
nTotalThreads = ((1+nTotalThreads) >> 1); // divide by two.
}
// the result
return buffer[0];
}
template<typename Real>
__device__
static int32_cuda _max_id_reduce(Real val[], int32_cuda idx[]) {
// Total number of active threads
int32_cuda nTotalThreads = blockDim.x;
__syncthreads();
// perform tree-based reduction (get index of maximum)
while(nTotalThreads > 1) {
int32_cuda halfPoint = ((1+nTotalThreads) >> 1); // divide by two
// only the first half of the threads will be active.
if (threadIdx.x < halfPoint) {
// Get the shared value stored by another thread
Real temp = -1e20;
if(threadIdx.x+halfPoint < nTotalThreads) {
temp = val[idx[threadIdx.x + halfPoint]];
}
if (temp > val[idx[threadIdx.x]]) idx[threadIdx.x]=idx[threadIdx.x + halfPoint];
}
__syncthreads();
nTotalThreads = ((1+nTotalThreads) >> 1); // divide by two.
}
// the result
return idx[0];
}
/***********************************************************************
* CUDA kernels
* the functions are templated to have the float/double operations
*/
/*
* CuMatrix
*/
template<typename Real>
__global__
static void _set_const(Real* mat, Real value, MatrixDim d) {
int32_cuda i = blockIdx.x * blockDim.x + threadIdx.x;
int32_cuda j = blockIdx.y * blockDim.y + threadIdx.y;
int32_cuda index = i + j*d.stride;
if ( i < d.cols && j < d.rows )
mat[index] = value;
}
template<typename Real>
__global__
static void _binarize(Real* mat, Real thres, MatrixDim d){
int32_cuda i = blockIdx.x * blockDim.x + threadIdx.x;
int32_cuda j = blockIdx.y * blockDim.y + threadIdx.y;
int32_cuda index = i + j * d.stride;
if( i < d.cols && j < d.rows )
{
if(mat[index]>thres) mat[index]=1.0;
else mat[index]=0.0;
}
}
template<typename Real>
__global__
static void _add_const(Real* mat, Real value, MatrixDim d) {
int32_cuda i = blockIdx.x * blockDim.x + threadIdx.x;
int32_cuda j = blockIdx.y * blockDim.y + threadIdx.y;
int32_cuda index = i + j*d.stride;
if ( i < d.cols && j < d.rows )
mat[index] = mat[index] + value;
}
template<typename Real>
__global__
static void _power(Real* mat, Real pw, MatrixDim d) {
int32_cuda i = blockIdx.x * blockDim.x + threadIdx.x;
int32_cuda j = blockIdx.y * blockDim.y + threadIdx.y;
int32_cuda index = i + j*d.stride;
if ( i < d.cols && j < d.rows )
mat[index] = pow(mat[index], pw);
}
template<typename Real>
__global__
static void _scale(Real* mat, Real value, MatrixDim d) {
int32_cuda i = blockIdx.x * blockDim.x + threadIdx.x;
int32_cuda j = blockIdx.y * blockDim.y + threadIdx.y;
int32_cuda index = i + j*d.stride;
if ( i < d.cols && j < d.rows )
mat[index] = mat[index] * value;
}
template<typename Real>
__global__
static void _apply_floor(Real* mat, Real value, MatrixDim d) {
int32_cuda i = blockIdx.x * blockDim.x + threadIdx.x;
int32_cuda j = blockIdx.y * blockDim.y + threadIdx.y;
int32_cuda index = i + j*d.stride;
if ( i < d.cols && j < d.rows ) {
if (mat[index] < value) mat[index] = value;
}
}
template<typename Real>
__global__
static void _apply_truncate(Real* mat, Real low, Real high, MatrixDim d) {
int32_cuda i = blockIdx.x * blockDim.x + threadIdx.x;
int32_cuda j = blockIdx.y * blockDim.y + threadIdx.y;
int32_cuda index = i + j*d.stride;
if ( i < d.cols && j < d.rows ) {
if (mat[index] < low) mat[index] = low;
if (mat[index] > high) mat[index] = high;
}
}
template<typename Real>
__global__
static void _apply_log(Real* mat, MatrixDim d) {
int32_cuda i = blockIdx.x * blockDim.x + threadIdx.x;
int32_cuda j = blockIdx.y * blockDim.y + threadIdx.y;
int32_cuda index = i + j*d.stride;
if ( i < d.cols && j < d.rows )
mat[index] = log(mat[index]);
}
template<typename Real>
__global__
static void _apply_exp(Real* mat, MatrixDim d) {
int32_cuda i = blockIdx.x * blockDim.x + threadIdx.x;
int32_cuda j = blockIdx.y * blockDim.y + threadIdx.y;
int32_cuda index = i + j*d.stride;
if ( i < d.cols && j < d.rows )
mat[index] = exp(mat[index]);
}
template<typename Real>
__global__
static void _mul_elements(Real* mat, const Real* A, MatrixDim d) {
int32_cuda i = blockIdx.x * blockDim.x + threadIdx.x;
int32_cuda j = blockIdx.y * blockDim.y + threadIdx.y;
int32_cuda index = i + j*d.stride;
if ( i < d.cols && j < d.rows )
mat[index] = mat[index] * A[index];
}
template<typename Real>
__global__
static void _div_elements(Real* mat, const Real* A, MatrixDim d) {
int32_cuda i = blockIdx.x * blockDim.x + threadIdx.x;
int32_cuda j = blockIdx.y * blockDim.y + threadIdx.y;
int32_cuda index = i + j*d.stride;
if ( i < d.cols && j < d.rows )
mat[index] = mat[index] / A[index];
}
template<typename Real>
__global__
static void _mul_cols_vec(Real* mat, const Real* scale, MatrixDim d) {
int32_cuda i = blockIdx.x * blockDim.x + threadIdx.x;
int32_cuda j = blockIdx.y * blockDim.y + threadIdx.y;
int32_cuda index = i + j*d.stride;
if ( i < d.cols && j < d.rows )
mat[index] *= scale[i];
}
template<typename Real>
__global__
static void _mul_rows_vec(Real* mat, const Real* scale, MatrixDim d) {
int32_cuda i = blockIdx.x * blockDim.x + threadIdx.x;
int32_cuda j = blockIdx.y * blockDim.y + threadIdx.y;
int32_cuda index = i + j*d.stride;
if ( i < d.cols && j < d.rows )
mat[index] *= scale[j];
}
template<typename Real>
__global__
static void _div_cols_vec(Real* mat, const Real* vec_div, MatrixDim d) {
int32_cuda i = blockIdx.x * blockDim.x + threadIdx.x;
int32_cuda j = blockIdx.y * blockDim.y + threadIdx.y;
int32_cuda index = i + j*d.stride;
if( i >= d.cols ) return;
//invert divider in shared memory
__shared__ Real inv[16];
if(threadIdx.y==0) {
inv[threadIdx.x] = 1.0/vec_div[i];
}
__syncthreads();
//multiply elements
if ( i < d.cols && j < d.rows )
mat[index] *= inv[threadIdx.x];
}
template<typename Real>
__global__
static void _div_rows_vec(Real* mat, const Real* vec_div, MatrixDim d) {
int32_cuda i = blockIdx.x * blockDim.x + threadIdx.x;
int32_cuda j = blockIdx.y * blockDim.y + threadIdx.y;
int32_cuda index = i + j*d.stride;
if( j >= d.rows ) return;
//invert divider in shared memory
__shared__ Real inv[16];
if(threadIdx.x==0) {
inv[threadIdx.y] = 1.0/vec_div[j];
}
__syncthreads();
//multiply elements
if ( i < d.cols && j < d.rows )
mat[index] *= inv[threadIdx.y];
}
template<typename Real>
__global__
static void _add_mat(Real alpha, const Real* A, Real beta, Real* dst, MatrixDim d) {
int32_cuda i = blockIdx.x * blockDim.x + threadIdx.x;
int32_cuda j = blockIdx.y * blockDim.y + threadIdx.y;
int32_cuda index = i + j*d.stride;
if ( i < d.cols && j < d.rows )
dst[index] = alpha*A[index] + beta*dst[index];
}
template<typename Real>
__global__
static void _part_add_mat(Real alpha, const Real* A, MatrixDim da, int32_cuda ro, int32_cuda co, Real beta, Real* dst, MatrixDim d) {
int32_cuda i = blockIdx.x * blockDim.x + threadIdx.x;
int32_cuda j = blockIdx.y * blockDim.y + threadIdx.y;
int32_cuda index = i + j*d.stride;
int32_cuda ai = i - co;
int32_cuda aj = j - ro;
int32_cuda A_index = ai + aj * da.stride;
if ( i < d.cols && j < d.rows && ai >=0 && ai <da.cols && aj >=0 && aj < da.rows)
dst[index] = alpha*A[A_index] + beta*dst[index];
}
template<typename Real>
__global__
static void _log_add_exp_mat(const Real* A, Real* dst, MatrixDim d) {
int32_cuda i = blockIdx.x * blockDim.x + threadIdx.x;
int32_cuda j = blockIdx.y * blockDim.y + threadIdx.y;
int32_cuda index = i + j*d.stride;
if ( i < d.cols && j < d.rows ) {
// find max
double fa = A[index];
double fb = dst[index];
double max, sum;
if(fa > fb) {
sum = 1.0 + exp(fb - fa);
max=fa;
}else{
sum = exp(fa-fb) + 1.0;
max=fb;
}
dst[index] = max + log(sum);
}
}
template<typename Real>
__global__
static void _add_vec_to_cols(Real alpha, const Real* col, Real beta, Real* dst, MatrixDim d) {
int32_cuda i = blockIdx.x * blockDim.x + threadIdx.x;
int32_cuda j = blockIdx.y * blockDim.y + threadIdx.y;
int32_cuda index = i + j*d.stride;
if ( i < d.cols && j < d.rows )
dst[index] = alpha*col[j] + beta*dst[index];
}
template<typename Real>
__global__
static void _add_vec_to_rows(Real alpha, const Real* row, Real beta, Real* dst, MatrixDim d) {
int32_cuda i = blockIdx.x * blockDim.x + threadIdx.x;
int32_cuda j = blockIdx.y * blockDim.y + threadIdx.y;
int32_cuda index = i + j*d.stride;
if ( i < d.cols && j < d.rows )
dst[index] = alpha*row[i] + beta*dst[index];
}
template<typename Real>
__global__
static void _add_vec_to_partial_rows(Real alpha, int32_cuda offset, const Real* row, int32_cuda dim, Real beta, Real* dst, MatrixDim d) {
int32_cuda i = blockIdx.x * blockDim.x + threadIdx.x;
int32_cuda j = blockIdx.y * blockDim.y + threadIdx.y;
int32_cuda index = i + j*d.stride;
int32_cuda vec_index = i - offset;
if ( vec_index >= 0 && vec_index < dim && i < d.cols && j < d.rows )
dst[index] = alpha*row[vec_index] + beta*dst[index];
}
template<typename Real>
__global__
static void _apply_mask(Real* mat, const char* mask, MatrixDim dmat, MatrixDim dmask) {
int32_cuda i = blockIdx.x * blockDim.x + threadIdx.x;
int32_cuda j = blockIdx.y * blockDim.y + threadIdx.y;
int32_cuda index = i + j*dmat.stride;
int32_cuda index2 = i + j*dmask.stride;
if ( i < dmat.cols && j < dmat.rows )
if(mask[index2] == 0) mat[index] = 0;
}
/*
* CuVector
*/
template<typename Real>
__global__
static void _add_row_sum_mat(const Real* mat, Real* vec_sum, MatrixDim d) {
int32_cuda i = blockIdx.y * blockDim.y + threadIdx.y; //col
int32_cuda j = blockIdx.x * blockDim.x + threadIdx.x; //row
if(blockIdx.x > 0) return;
if(blockDim.y > 1) return;
__shared__ Real row_data[256];
//copy the input to row_data
row_data[j] = mat[i+j*d.stride];
__syncthreads();
//get the sum
Real sum = _sum_reduce(row_data);
__syncthreads();
//add to previously accumulated sum
if(threadIdx.x == 0)
vec_sum[i] += sum;
}
template<typename Real>
__global__
static void _add_col_sum_mat(const Real* mat, Real* vec_sum, MatrixDim d) {
int32_cuda i = blockIdx.x * blockDim.x + threadIdx.x; //row
int32_cuda j = blockIdx.y * blockDim.y + threadIdx.y; //col
if(blockIdx.x > 0) return;
if(blockDim.y > 1) return;
__shared__ Real row_data[256];
//copy the input to row_data
row_data[i] = mat[i+j*d.stride];
__syncthreads();
//get the sum
Real sum = _sum_reduce(row_data);
__syncthreads();
//add to previously accumulated sum
if(threadIdx.x == 0)
vec_sum[j] += sum;
}
template<typename Real>
__global__
static void _invert_elements(Real* data, MatrixDim d) {
int32_cuda i = blockIdx.x * blockDim.x + threadIdx.x;
int32_cuda j = blockIdx.y * blockDim.y + threadIdx.y;
int32_cuda index = i + j*d.stride;
if ( i < d.cols && j < d.rows )
data[index] = 1.0/data[index];
}
/*
* cu::
*/
template<typename Real>
__global__
static void _relu(Real*y, const Real*x, MatrixDim d) {
int32_cuda i = blockIdx.x * blockDim.x + threadIdx.x;
int32_cuda j = blockIdx.y * blockDim.y + threadIdx.y;
int32_cuda index = i + j*d.stride;
if( i < d.cols && j < d.rows ) {
Real res = (x[index] > 0.0) ? x[index] : 0.0;
y[index] = res;
}
}
template<typename Real>
__global__
static void _diff_relu(Real*eout, const Real*e, const Real*y, MatrixDim d) {
int32_cuda i = blockIdx.x * blockDim.x + threadIdx.x;
int32_cuda j = blockIdx.y * blockDim.y + threadIdx.y;
int32_cuda index = i + j*d.stride;
if( i < d.cols && j < d.rows )
eout[index] = (y[index] > 0.0) * e[index];
}
template<typename Real>
__global__
static void _softrelu(Real*y, const Real*x, MatrixDim d) {
int32_cuda i = blockIdx.x * blockDim.x + threadIdx.x;
int32_cuda j = blockIdx.y * blockDim.y + threadIdx.y;
int32_cuda index = i + j*d.stride;
if( i < d.cols && j < d.rows ) {
Real res = (x[index] > 4.0) ? x[index] : log(1.0+exp(x[index]));
y[index] = res;
}
}
template<typename Real>
__global__
static void _diff_softrelu(Real*eout, const Real*e, const Real*x, MatrixDim d) {
int32_cuda i = blockIdx.x * blockDim.x + threadIdx.x;
int32_cuda j = blockIdx.y * blockDim.y + threadIdx.y;
int32_cuda index = i + j*d.stride;
if( i < d.cols && j < d.rows ) {
if(x[index]>4.0){
eout[index] = e[index];
}else{
Real f = exp(x[index]);
eout[index] = e[index] * f / (1+f);
}
}
}
template<typename Real>
__global__
static void _sigmoid(Real*y, const Real*x, MatrixDim d) {
int32_cuda i = blockIdx.x * blockDim.x + threadIdx.x;
int32_cuda j = blockIdx.y * blockDim.y + threadIdx.y;
int32_cuda index = i + j*d.stride;
if( i < d.cols && j < d.rows ) {
Real res = 1.0 / (1.0 + exp(-x[index]));
y[index] = res;
}
}
template<typename Real>
__global__
static void _diff_sigmoid(Real*eout, const Real*e, const Real*y, MatrixDim d) {
int32_cuda i = blockIdx.x * blockDim.x + threadIdx.x;
int32_cuda j = blockIdx.y * blockDim.y + threadIdx.y;
int32_cuda index = i + j*d.stride;
if( i < d.cols && j < d.rows )
eout[index] = y[index]*(1.0-y[index]) * e[index];
}
template<typename Real>
__global__
static void _softmax(Real*y, const Real*x, MatrixDim d) {
int32_cuda j = blockIdx.x * blockDim.x + threadIdx.x;
if(j >= d.rows) return;
//copy to output and find max...
double max = -1e20;
double sum = 0.0;
for(int32_cuda i=0; i<d.cols; i++) {
if(max < x[i+j*d.stride]) max = x[i+j*d.stride];
y[i+j*d.stride] = x[i+j*d.stride];
}
//subtract max, apply exp, sum up...
for(int32_cuda i=0; i<d.cols; i++) {
y[i+j*d.stride] = exp(y[i+j*d.stride] - max);
sum += y[i+j*d.stride];
}
//normalize by sum...
for(int32_cuda i=0; i<d.cols; i++) {
y[i+j*d.stride] /= sum;
}
}
template<typename Real>
__global__
static void _expand(Real* y, const Real* x, const int32_cuda* off, MatrixDim d_out, MatrixDim d_in) {
int32_cuda i = blockIdx.x * blockDim.x + threadIdx.x;
int32_cuda j = blockIdx.y * blockDim.y + threadIdx.y;
int32_cuda index = i + j*d_out.stride;
if( i < d_out.cols && j < d_out.rows ) {
int32_cuda src_col = i % d_in.cols;
int32_cuda src_row = j + off[i / d_in.cols];
if(src_row < 0) src_row = 0;
if(src_row >= d_in.rows) src_row = d_in.rows-1;
y[index] = x[src_col + src_row*d_in.stride];
}
}
template<typename Real>
__global__
static void _copy(Real* y, const Real* x, const int32_cuda* copy_from, MatrixDim d_out, MatrixDim d_in) {
int32_cuda i = blockIdx.x * blockDim.x + threadIdx.x;
int32_cuda j = blockIdx.y * blockDim.y + threadIdx.y;
int32_cuda index = i + j*d_out.stride;
if( i < d_out.cols && j < d_out.rows ) {
int32_cuda src_col = copy_from[i];
if(src_col >= 0 && src_col < d_in.cols) {
y[index] = x[src_col + j*d_in.stride];
} else {
y[index] = 1.0/0.0;
}
}
}
template<typename Real>
__global__
static void _randomize(Real* y, const Real* x, const int32_cuda* copy_from, MatrixDim d_out, MatrixDim d_in) {
int32_cuda i = blockIdx.x * blockDim.x + threadIdx.x;
int32_cuda j = blockIdx.y * blockDim.y + threadIdx.y;
int32_cuda index = i + j*d_out.stride;
if( i < d_out.cols && j < d_out.rows ) {
int32_cuda src_row = copy_from[j];
y[index] = x[i + src_row*d_in.stride];
}
}
template<typename Real>
__global__
static void _regularize_l1(Real* wei, Real* grad, Real l1, Real lr, MatrixDim d) {
int32_cuda i = blockIdx.x * blockDim.x + threadIdx.x;
int32_cuda j = blockIdx.y * blockDim.y + threadIdx.y;
int32_cuda index = i + j*d.stride;
if ( i < d.cols && j < d.rows ) {
if(wei[index]==0.0) return; //skip L1 if zero weight!
Real l1_signed = l1;
if(wei[index] < 0.0) //flip sign
l1_signed = -l1;
Real before = wei[index];
Real after = wei[index] -lr*grad[index] -l1_signed;//simulate update
if((after > 0.0) ^ (before > 0.0)) { //sign changed?
wei[index] = 0.0;
grad[index] = 0.0;
} else {
wei[index] -= l1_signed;
}
}
}
template<typename Real>
__global__
static void _find_row_max_id(const Real* mat, Real* vec_val, int32_cuda* vec_id, int32_cuda voff, MatrixDim d) {
int32_cuda i = blockIdx.x * blockDim.x + threadIdx.x;
int32_cuda j = blockIdx.y * blockDim.y + threadIdx.y;
if(blockIdx.x > 0) return;
if(blockDim.y != 1) return;
__shared__ Real value[256];
__shared__ int32_cuda index[256];
//copy to shared memory
value[threadIdx.x] = mat[i+j*d.stride];
index[threadIdx.x] = threadIdx.x;
__syncthreads();
//get the id of the max value
int32_cuda out_max = _max_id_reduce(value,index);
__syncthreads();
//see if it's bigger value
if(threadIdx.x == 0) {
if(vec_val[j] <= mat[out_max+j*d.stride]) {
vec_val[j] = mat[out_max+j*d.stride];
vec_id[j] = voff+out_max;
}
}
}
template<typename Real>
__global__
static void _diff_xent(const int32_cuda* vec_tgt, Real* mat_net_out, Real* vec_log_post, MatrixDim d) {
int32_cuda i = blockIdx.x * blockDim.x + threadIdx.x;
int32_cuda j = blockIdx.y * blockDim.y + threadIdx.y;
if(i>0) return;
if(j<d.rows) {
int32_cuda index = vec_tgt[j] + j*d.stride;
Real value = mat_net_out[index];
if(value < 1e-20) value = 1e-20;
vec_log_post[j] = log(value);
mat_net_out[index] -= 1.0;
}
}
template<typename Real>
__global__
static void _softmax_part(const Real* X, const int32_cuda* vec_ids, Real* Y, MatrixDim d) {
int32_cuda i = blockIdx.x * blockDim.x + threadIdx.x;
int32_cuda j = blockIdx.y * blockDim.y + threadIdx.y;
int32_cuda index = i + j*d.stride;
if ( i < d.cols && j < d.rows ) {
Real tmp = X[index] - X[vec_ids[j] + j*d.stride];
Y[index] = exp(tmp);
}
}
/***********************************************************************
* ANSI-C wrappers of CUDA kernels
*/
/*
* "int32"
*/
void cudaI32_set_const(dim3 Gr, dim3 Bl, int32_cuda* mat, int32_cuda value, MatrixDim d) {
hipLaunchKernelGGL(( _set_const), dim3(Gr),dim3(Bl), 0, 0, mat,value,d);
}
/*
* "float"
*/
/*
* CuMatrix
*/
void cudaF_set_const(dim3 Gr, dim3 Bl, float* mat, float value, MatrixDim d) {
hipLaunchKernelGGL(( _set_const), dim3(Gr),dim3(Bl), 0, 0, mat,value,d);
}
void cudaF_binarize(dim3 Gr, dim3 Bl, float* mat, float thres, MatrixDim d) {
hipLaunchKernelGGL(( _binarize), dim3(Gr),dim3(Bl), 0, 0, mat,thres,d);
}
void cudaF_add_const(dim3 Gr, dim3 Bl, float* mat, float value, MatrixDim d) {
hipLaunchKernelGGL(( _add_const), dim3(Gr),dim3(Bl), 0, 0, mat,value,d);
}
void cudaF_power(dim3 Gr, dim3 Bl, float* mat, float pow, MatrixDim d) {
hipLaunchKernelGGL(( _power), dim3(Gr),dim3(Bl), 0, 0, mat,pow,d);
}
void cudaF_scale(dim3 Gr, dim3 Bl, float* mat, float value, MatrixDim d) {
hipLaunchKernelGGL(( _scale), dim3(Gr),dim3(Bl), 0, 0, mat,value,d);
}
void cudaF_apply_floor(dim3 Gr, dim3 Bl, float* mat, float value, MatrixDim d) {
hipLaunchKernelGGL(( _apply_floor), dim3(Gr),dim3(Bl), 0, 0, mat,value,d);
}
void cudaF_apply_truncate(dim3 Gr, dim3 Bl, float* mat, float low, float high, MatrixDim d) {
hipLaunchKernelGGL(( _apply_truncate), dim3(Gr),dim3(Bl), 0, 0, mat,low,high,d);
}
void cudaF_apply_log(dim3 Gr, dim3 Bl, float* mat, MatrixDim d) {
hipLaunchKernelGGL(( _apply_log), dim3(Gr),dim3(Bl), 0, 0, mat,d);
}
void cudaF_apply_exp(dim3 Gr, dim3 Bl, float* mat, MatrixDim d) {
hipLaunchKernelGGL(( _apply_exp), dim3(Gr),dim3(Bl), 0, 0, mat,d);
}
void cudaF_mul_elements(dim3 Gr, dim3 Bl, float* mat, const float* A, MatrixDim d) {
hipLaunchKernelGGL(( _mul_elements), dim3(Gr),dim3(Bl), 0, 0, mat,A,d);
}
void cudaF_div_elements(dim3 Gr, dim3 Bl, float* mat, const float* A, MatrixDim d) {
hipLaunchKernelGGL(( _div_elements), dim3(Gr),dim3(Bl), 0, 0, mat,A,d);
}
void cudaF_mul_cols_vec(dim3 Gr, dim3 Bl, float* mat, const float* scale, MatrixDim d) {
hipLaunchKernelGGL(( _mul_cols_vec), dim3(Gr),dim3(Bl), 0, 0, mat,scale,d);
}
void cudaF_mul_rows_vec(dim3 Gr, dim3 Bl, float* mat, const float* scale, MatrixDim d) {
hipLaunchKernelGGL(( _mul_rows_vec), dim3(Gr),dim3(Bl), 0, 0, mat,scale,d);
}
void cudaF_div_cols_vec(dim3 Gr, dim3 Bl, float* mat, const float* vec_div, MatrixDim d) {
hipLaunchKernelGGL(( _div_cols_vec), dim3(Gr),dim3(Bl), 0, 0, mat, vec_div, d);
}
void cudaF_div_rows_vec(dim3 Gr, dim3 Bl, float* mat, const float* vec_div, MatrixDim d) {
hipLaunchKernelGGL(( _div_rows_vec), dim3(Gr),dim3(Bl), 0, 0, mat, vec_div, d);
}
void cudaF_add_mat(dim3 Gr, dim3 Bl, float alpha, const float* A, float beta, float* dst, MatrixDim d) {
hipLaunchKernelGGL(( _add_mat), dim3(Gr),dim3(Bl), 0, 0, alpha,A,beta,dst,d);
}
void cudaF_part_add_mat(dim3 Gr, dim3 Bl, float alpha, const float* A, MatrixDim da, int32_cuda ro, int32_cuda co, float beta, float* dst, MatrixDim d) {
hipLaunchKernelGGL(( _part_add_mat), dim3(Gr),dim3(Bl), 0, 0, alpha,A,da,ro,co,beta,dst,d);
}
void cudaF_add_vec_to_cols(dim3 Gr, dim3 Bl, float alpha, const float* col, float beta, float* dst, MatrixDim d) {
hipLaunchKernelGGL(( _add_vec_to_cols), dim3(Gr),dim3(Bl), 0, 0, alpha,col,beta,dst,d);
}
void cudaF_add_vec_to_rows(dim3 Gr, dim3 Bl, float alpha, const float* row, float beta, float* dst, MatrixDim d) {
hipLaunchKernelGGL(( _add_vec_to_rows), dim3(Gr),dim3(Bl), 0, 0, alpha,row,beta,dst,d);
}
void cudaF_add_vec_to_partial_rows(dim3 Gr, dim3 Bl, float alpha, int32_cuda offset, const float* row, int32_cuda dim, float beta, float* dst, MatrixDim d) {
hipLaunchKernelGGL(( _add_vec_to_partial_rows), dim3(Gr),dim3(Bl), 0, 0, alpha,offset,row,dim,beta,dst,d);
}
// CURRENTLY UNUSED...
void cudaF_apply_mask(dim3 Gr, dim3 Bl, float* mat, const char* mask, MatrixDim dmat, MatrixDim dmask) {
hipLaunchKernelGGL(( _apply_mask), dim3(Gr),dim3(Bl), 0, 0, mat,mask,dmat,dmask);
}
void cudaF_log_add_exp_mat(dim3 Gr, dim3 Bl, const float* A, float* dst, MatrixDim d) {
hipLaunchKernelGGL(( _log_add_exp_mat), dim3(Gr),dim3(Bl), 0, 0, A,dst,d);
}
/*
* CuVector
*/
void cudaF_add_row_sum_mat(dim3 Gr, dim3 Bl, const float* mat, float* vec_sum, MatrixDim d) {
hipLaunchKernelGGL(( _add_row_sum_mat), dim3(Gr),dim3(Bl), 0, 0, mat,vec_sum,d);
}
void cudaF_add_col_sum_mat(dim3 Gr, dim3 Bl, const float* mat, float* vec_sum, MatrixDim d) {
hipLaunchKernelGGL(( _add_col_sum_mat), dim3(Gr),dim3(Bl), 0, 0, mat,vec_sum,d);
}
void cudaF_invert_elements(dim3 Gr, dim3 Bl, float* data, MatrixDim d) {
hipLaunchKernelGGL(( _invert_elements), dim3(Gr),dim3(Bl), 0, 0, data, d);
}
/*
* cu::
*/
void cudaF_relu (dim3 Gr, dim3 Bl, float* y, const float* x, MatrixDim d) {
hipLaunchKernelGGL(( _relu), dim3(Gr),dim3(Bl), 0, 0, y, x, d);
}
void cudaF_diff_relu (dim3 Gr, dim3 Bl, float* eout, const float* e, const float* y, MatrixDim d) {
hipLaunchKernelGGL(( _diff_relu), dim3(Gr),dim3(Bl), 0, 0, eout, e, y, d);
}
void cudaF_softrelu (dim3 Gr, dim3 Bl, float* y, const float* x, MatrixDim d) {
hipLaunchKernelGGL(( _softrelu), dim3(Gr),dim3(Bl), 0, 0, y, x, d);
}
void cudaF_diff_softrelu (dim3 Gr, dim3 Bl, float* eout, const float* e, const float* x, MatrixDim d) {
hipLaunchKernelGGL(( _diff_softrelu), dim3(Gr),dim3(Bl), 0, 0, eout, e, x, d);
}
void cudaF_sigmoid (dim3 Gr, dim3 Bl, float* y, const float* x, MatrixDim d) {
hipLaunchKernelGGL(( _sigmoid), dim3(Gr),dim3(Bl), 0, 0, y, x, d);
}
void cudaF_diff_sigmoid (dim3 Gr, dim3 Bl, float* eout, const float* e, const float* y, MatrixDim d) {
hipLaunchKernelGGL(( _diff_sigmoid), dim3(Gr),dim3(Bl), 0, 0, eout, e, y, d);
}
void cudaF_softmax (size_t Gr, size_t Bl, float* y, const float* x, MatrixDim d) {
hipLaunchKernelGGL(( _softmax), dim3(Gr),dim3(Bl), 0, 0, y, x, d);
}
void cudaF_softmax_part(dim3 Gr, dim3 Bl, const float* X, const int32_cuda* vec_ids, float* Y, MatrixDim d) {
hipLaunchKernelGGL(( _softmax_part), dim3(Gr),dim3(Bl), 0, 0, X,vec_ids,Y,d);
}
void cudaF_expand(dim3 Gr, dim3 Bl, float* y, const float* x, const int32_cuda* off, MatrixDim d_out, MatrixDim d_in) {
hipLaunchKernelGGL(( _expand), dim3(Gr),dim3(Bl), 0, 0, y,x,off,d_out,d_in);
}
void cudaF_copy(dim3 Gr, dim3 Bl, float* y, const float* x, const int32_cuda* copy_from, MatrixDim d_out, MatrixDim d_in) {
hipLaunchKernelGGL(( _copy), dim3(Gr),dim3(Bl), 0, 0, y,x,copy_from,d_out,d_in);
}
void cudaF_randomize(dim3 Gr, dim3 Bl, float* y, const float* x, const int32_cuda* copy_from, MatrixDim d_out, MatrixDim d_in) {
hipLaunchKernelGGL(( _randomize), dim3(Gr),dim3(Bl), 0, 0, y,x,copy_from,d_out,d_in);
}
void cudaF_regularize_l1(dim3 Gr, dim3 Bl, float* wei, float* grad, float l1, float lr, MatrixDim d) {
hipLaunchKernelGGL(( _regularize_l1), dim3(Gr),dim3(Bl), 0, 0, wei,grad,l1,lr,d);
}
void cudaF_find_row_max_id(dim3 Gr, dim3 Bl, const float* mat, float* vec_val, int32_cuda* vec_id, int32_cuda voff, MatrixDim d) {
hipLaunchKernelGGL(( _find_row_max_id), dim3(Gr),dim3(Bl), 0, 0, mat, vec_val, vec_id, voff, d);
}
void cudaF_diff_xent(dim3 Gr, dim3 Bl, const int32_cuda* vec_tgt, float* mat_net_out, float* vec_log_post, MatrixDim d) {
hipLaunchKernelGGL(( _diff_xent), dim3(Gr),dim3(Bl), 0, 0, vec_tgt,mat_net_out,vec_log_post,d);
}
/*
* "double"
*/
/*
* CuMatrix
*/
void cudaD_set_const(dim3 Gr, dim3 Bl, double* mat, double value, MatrixDim d) {
hipLaunchKernelGGL(( _set_const), dim3(Gr),dim3(Bl), 0, 0, mat,value,d);
}
void cudaD_binarize(dim3 Gr, dim3 Bl, double* mat, double thres, MatrixDim d) {
hipLaunchKernelGGL(( _binarize), dim3(Gr),dim3(Bl), 0, 0, mat,thres,d);
}
void cudaD_add_const(dim3 Gr, dim3 Bl, double* mat, double value, MatrixDim d) {
hipLaunchKernelGGL(( _add_const), dim3(Gr),dim3(Bl), 0, 0, mat,value,d);
}
void cudaD_power(dim3 Gr, dim3 Bl, double* mat, double pow, MatrixDim d) {
hipLaunchKernelGGL(( _power), dim3(Gr),dim3(Bl), 0, 0, mat,pow,d);
}
void cudaD_scale(dim3 Gr, dim3 Bl, double* mat, double value, MatrixDim d) {
hipLaunchKernelGGL(( _scale), dim3(Gr),dim3(Bl), 0, 0, mat,value,d);
}
void cudaD_apply_floor(dim3 Gr, dim3 Bl, double* mat, double value, MatrixDim d) {
hipLaunchKernelGGL(( _apply_floor), dim3(Gr),dim3(Bl), 0, 0, mat,value,d);
}
void cudaD_apply_truncate(dim3 Gr, dim3 Bl, double* mat, double low, double high, MatrixDim d) {
hipLaunchKernelGGL(( _apply_truncate), dim3(Gr),dim3(Bl), 0, 0, mat,low,high,d);
}
void cudaD_apply_log(dim3 Gr, dim3 Bl, double* mat, MatrixDim d) {
hipLaunchKernelGGL(( _apply_log), dim3(Gr),dim3(Bl), 0, 0, mat,d);
}
void cudaD_apply_exp(dim3 Gr, dim3 Bl, double* mat, MatrixDim d) {
hipLaunchKernelGGL(( _apply_exp), dim3(Gr),dim3(Bl), 0, 0, mat,d);
}
void cudaD_mul_elements(dim3 Gr, dim3 Bl, double* mat, const double* A, MatrixDim d) {
hipLaunchKernelGGL(( _mul_elements), dim3(Gr),dim3(Bl), 0, 0, mat,A,d);
}
void cudaD_div_elements(dim3 Gr, dim3 Bl, double* mat, const double* A, MatrixDim d) {
hipLaunchKernelGGL(( _div_elements), dim3(Gr),dim3(Bl), 0, 0, mat,A,d);
}
void cudaD_mul_cols_vec(dim3 Gr, dim3 Bl, double* mat, const double* scale, MatrixDim d) {
hipLaunchKernelGGL(( _mul_cols_vec), dim3(Gr),dim3(Bl), 0, 0, mat,scale,d);
}
void cudaD_mul_rows_vec(dim3 Gr, dim3 Bl, double* mat, const double* scale, MatrixDim d) {
hipLaunchKernelGGL(( _mul_rows_vec), dim3(Gr),dim3(Bl), 0, 0, mat,scale,d);
}
void cudaD_div_cols_vec(dim3 Gr, dim3 Bl, double* mat, const double* vec_div, MatrixDim d) {
hipLaunchKernelGGL(( _div_cols_vec), dim3(Gr),dim3(Bl), 0, 0, mat, vec_div, d);
}
void cudaD_div_rows_vec(dim3 Gr, dim3 Bl, double* mat, const double* vec_div, MatrixDim d) {
hipLaunchKernelGGL(( _div_rows_vec), dim3(Gr),dim3(Bl), 0, 0, mat, vec_div, d);
}
void cudaD_add_mat(dim3 Gr, dim3 Bl, double alpha, const double* A, double beta, double* dst, MatrixDim d) {
hipLaunchKernelGGL(( _add_mat), dim3(Gr),dim3(Bl), 0, 0, alpha,A,beta,dst,d);
}
void cudaD_prat_add_mat(dim3 Gr, dim3 Bl, double alpha, const double* A, MatrixDim da, int32_cuda ro, int32_cuda co, double beta, double* dst, MatrixDim d){
hipLaunchKernelGGL(( _part_add_mat), dim3(Gr),dim3(Bl), 0, 0, alpha,A,da,ro,co,beta,dst,d);
}
void cudaD_add_vec_to_cols(dim3 Gr, dim3 Bl, double alpha, const double* col, double beta, double* dst, MatrixDim d) {
hipLaunchKernelGGL(( _add_vec_to_cols), dim3(Gr),dim3(Bl), 0, 0, alpha,col,beta,dst,d);
}
void cudaD_add_vec_to_rows(dim3 Gr, dim3 Bl, double alpha, const double* row, double beta, double* dst, MatrixDim d) {
hipLaunchKernelGGL(( _add_vec_to_rows), dim3(Gr),dim3(Bl), 0, 0, alpha,row,beta,dst,d);
}
void cudaD_add_vec_to_partial_rows(dim3 Gr, dim3 Bl, double alpha, int32_cuda offset, const double* row, int32_cuda dim, double beta, double* dst, MatrixDim d){
hipLaunchKernelGGL(( _add_vec_to_partial_rows), dim3(Gr),dim3(Bl), 0, 0, alpha,offset,row,dim,beta,dst,d);
}
// CURRENTLY UNUSED...
void cudaD_apply_mask(dim3 Gr, dim3 Bl, double* mat, const char* mask, MatrixDim dmat, MatrixDim dmask) {
hipLaunchKernelGGL(( _apply_mask), dim3(Gr),dim3(Bl), 0, 0, mat,mask,dmat,dmask);
}
void cudaD_log_add_exp_mat(dim3 Gr, dim3 Bl, const double* A, double* dst, MatrixDim d) {
hipLaunchKernelGGL(( _log_add_exp_mat), dim3(Gr),dim3(Bl), 0, 0, A,dst,d);
}
/*
* CuVector
*/
void cudaD_add_row_sum_mat(dim3 Gr, dim3 Bl, const double* mat, double* vec_sum, MatrixDim d) {
hipLaunchKernelGGL(( _add_row_sum_mat), dim3(Gr),dim3(Bl), 0, 0, mat,vec_sum,d);
}
void cudaD_add_col_sum_mat(dim3 Gr, dim3 Bl, const double* mat, double* vec_sum, MatrixDim d) {
hipLaunchKernelGGL(( _add_col_sum_mat), dim3(Gr),dim3(Bl), 0, 0, mat,vec_sum,d);
}
void cudaD_invert_elements(dim3 Gr, dim3 Bl, double* data, MatrixDim d) {
hipLaunchKernelGGL(( _invert_elements), dim3(Gr),dim3(Bl), 0, 0, data, d);
}
/*
* cu::
*/
void cudaD_relu (dim3 Gr, dim3 Bl, double* y, const double* x, MatrixDim d) {
hipLaunchKernelGGL(( _relu), dim3(Gr),dim3(Bl), 0, 0, y, x, d);
}
void cudaD_diff_relu (dim3 Gr, dim3 Bl, double* eout, const double* e, const double* y, MatrixDim d) {
hipLaunchKernelGGL(( _diff_relu), dim3(Gr),dim3(Bl), 0, 0, eout, e, y, d);
}
void cudaD_softrelu (dim3 Gr, dim3 Bl, double* y, const double* x, MatrixDim d) {
hipLaunchKernelGGL(( _softrelu), dim3(Gr),dim3(Bl), 0, 0, y, x, d);
}
void cudaD_diff_softrelu (dim3 Gr, dim3 Bl, double* eout, const double* e, const double* x, MatrixDim d) {
hipLaunchKernelGGL(( _diff_softrelu), dim3(Gr),dim3(Bl), 0, 0, eout, e, x, d);
}
void cudaD_sigmoid (dim3 Gr, dim3 Bl, double* y, const double* x, MatrixDim d) {
hipLaunchKernelGGL(( _sigmoid), dim3(Gr),dim3(Bl), 0, 0, y, x, d);
}
void cudaD_diff_sigmoid (dim3 Gr, dim3 Bl, double* eout, const double* e, const double* y, MatrixDim d) {
hipLaunchKernelGGL(( _diff_sigmoid), dim3(Gr),dim3(Bl), 0, 0, eout, e, y, d);
}
void cudaD_softmax (size_t Gr, size_t Bl, double* y, const double* x, MatrixDim d) {
hipLaunchKernelGGL(( _softmax), dim3(Gr),dim3(Bl), 0, 0, y, x, d);
}
void cudaD_softmax_part(dim3 Gr, dim3 Bl, const double* X, const int32_cuda* vec_ids, double* Y, MatrixDim d) {
hipLaunchKernelGGL(( _softmax_part), dim3(Gr),dim3(Bl), 0, 0, X,vec_ids,Y,d);
}
void cudaD_expand(dim3 Gr, dim3 Bl, double* y, const double* x, const int32_cuda* off, MatrixDim d_out, MatrixDim d_in) {
hipLaunchKernelGGL(( _expand), dim3(Gr),dim3(Bl), 0, 0, y,x,off,d_out,d_in);
}
void cudaD_copy(dim3 Gr, dim3 Bl, double* y, const double* x, const int32_cuda* copy_from, MatrixDim d_out, MatrixDim d_in) {
hipLaunchKernelGGL(( _copy), dim3(Gr),dim3(Bl), 0, 0, y,x,copy_from,d_out,d_in);
}
void cudaD_randomize(dim3 Gr, dim3 Bl, double* y, const double* x, const int32_cuda* copy_from, MatrixDim d_out, MatrixDim d_in) {
hipLaunchKernelGGL(( _randomize), dim3(Gr),dim3(Bl), 0, 0, y,x,copy_from,d_out,d_in);
}
void cudaD_regularize_l1(dim3 Gr, dim3 Bl, double* wei, double* grad, double l1, double lr, MatrixDim d) {
hipLaunchKernelGGL(( _regularize_l1), dim3(Gr),dim3(Bl), 0, 0, wei,grad,l1,lr,d);
}
void cudaD_find_row_max_id(dim3 Gr, dim3 Bl, const double* mat, double* vec_val, int32_cuda* vec_id, int32_cuda voff, MatrixDim d) {
hipLaunchKernelGGL(( _find_row_max_id), dim3(Gr),dim3(Bl), 0, 0, mat, vec_val, vec_id, voff, d);
}
void cudaD_diff_xent(dim3 Gr, dim3 Bl, const int32_cuda* vec_tgt, double* mat_net_out, double* vec_log_post, MatrixDim d) {
hipLaunchKernelGGL(( _diff_xent), dim3(Gr),dim3(Bl), 0, 0, vec_tgt,mat_net_out,vec_log_post,d);
}
| bb39c179e4218aaa66105dac59435d09f577fabf.cu | // cudamatrix/cu-kernels.cu
// Copyright 2009-2012 Karel Vesely
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// THIS CODE IS PROVIDED *AS IS* BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
// KIND, EITHER EXPRESS OR IMPLIED, INCLUDING WITHOUT LIMITATION ANY IMPLIED
// WARRANTIES OR CONDITIONS OF TITLE, FITNESS FOR A PARTICULAR PURPOSE,
// MERCHANTABLITY OR NON-INFRINGEMENT.
// See the Apache 2 License for the specific language governing permissions and
// limitations under the License.
// In this file is the CUDA code of the CUDA kernels, plus the ANSI-C wrappers
#include <cfloat>
#include "cu-kernels-ansi.h"
/***********************************************************************
* Generic __device__ functions
*/
template<typename Real>
__device__
static Real _sum_reduce(Real buffer[]) {
// Total number of active threads
int32_cuda nTotalThreads = blockDim.x;
__syncthreads();
// perform tree-based reduction (sum)
while(nTotalThreads > 1) {
int32_cuda halfPoint = ((1+nTotalThreads) >> 1); // divide by two
// only the first half of the threads will be active.
if (threadIdx.x < halfPoint) {
// Get the shared value stored by another thread
Real temp = 0.0;
if(threadIdx.x+halfPoint < nTotalThreads) {
temp = buffer[threadIdx.x + halfPoint];
}
buffer[threadIdx.x] += temp;
}
__syncthreads();
nTotalThreads = ((1+nTotalThreads) >> 1); // divide by two.
}
// the result
return buffer[0];
}
template<typename Real>
__device__
static Real _max_reduce(Real buffer[]) {
// Total number of active threads
int32_cuda nTotalThreads = blockDim.x;
__syncthreads();
// perform tree-based reduction (max)
while(nTotalThreads > 1) {
int32_cuda halfPoint = ((1+nTotalThreads) >> 1); // divide by two
// only the first half of the threads will be active.
if (threadIdx.x < halfPoint) {
// Get the shared value stored by another thread
Real temp = -1e20;
if(threadIdx.x+halfPoint < nTotalThreads) {
temp = buffer[threadIdx.x + halfPoint];
}
if (temp > buffer[threadIdx.x]) buffer[threadIdx.x] = temp;
}
__syncthreads();
nTotalThreads = ((1+nTotalThreads) >> 1); // divide by two.
}
// the result
return buffer[0];
}
template<typename Real>
__device__
static int32_cuda _max_id_reduce(Real val[], int32_cuda idx[]) {
// Total number of active threads
int32_cuda nTotalThreads = blockDim.x;
__syncthreads();
// perform tree-based reduction (get index of maximum)
while(nTotalThreads > 1) {
int32_cuda halfPoint = ((1+nTotalThreads) >> 1); // divide by two
// only the first half of the threads will be active.
if (threadIdx.x < halfPoint) {
// Get the shared value stored by another thread
Real temp = -1e20;
if(threadIdx.x+halfPoint < nTotalThreads) {
temp = val[idx[threadIdx.x + halfPoint]];
}
if (temp > val[idx[threadIdx.x]]) idx[threadIdx.x]=idx[threadIdx.x + halfPoint];
}
__syncthreads();
nTotalThreads = ((1+nTotalThreads) >> 1); // divide by two.
}
// the result
return idx[0];
}
/***********************************************************************
* CUDA kernels
* the functions are templated to have the float/double operations
*/
/*
* CuMatrix
*/
template<typename Real>
__global__
static void _set_const(Real* mat, Real value, MatrixDim d) {
int32_cuda i = blockIdx.x * blockDim.x + threadIdx.x;
int32_cuda j = blockIdx.y * blockDim.y + threadIdx.y;
int32_cuda index = i + j*d.stride;
if ( i < d.cols && j < d.rows )
mat[index] = value;
}
template<typename Real>
__global__
static void _binarize(Real* mat, Real thres, MatrixDim d){
int32_cuda i = blockIdx.x * blockDim.x + threadIdx.x;
int32_cuda j = blockIdx.y * blockDim.y + threadIdx.y;
int32_cuda index = i + j * d.stride;
if( i < d.cols && j < d.rows )
{
if(mat[index]>thres) mat[index]=1.0;
else mat[index]=0.0;
}
}
template<typename Real>
__global__
static void _add_const(Real* mat, Real value, MatrixDim d) {
int32_cuda i = blockIdx.x * blockDim.x + threadIdx.x;
int32_cuda j = blockIdx.y * blockDim.y + threadIdx.y;
int32_cuda index = i + j*d.stride;
if ( i < d.cols && j < d.rows )
mat[index] = mat[index] + value;
}
template<typename Real>
__global__
static void _power(Real* mat, Real pw, MatrixDim d) {
int32_cuda i = blockIdx.x * blockDim.x + threadIdx.x;
int32_cuda j = blockIdx.y * blockDim.y + threadIdx.y;
int32_cuda index = i + j*d.stride;
if ( i < d.cols && j < d.rows )
mat[index] = pow(mat[index], pw);
}
template<typename Real>
__global__
static void _scale(Real* mat, Real value, MatrixDim d) {
int32_cuda i = blockIdx.x * blockDim.x + threadIdx.x;
int32_cuda j = blockIdx.y * blockDim.y + threadIdx.y;
int32_cuda index = i + j*d.stride;
if ( i < d.cols && j < d.rows )
mat[index] = mat[index] * value;
}
template<typename Real>
__global__
static void _apply_floor(Real* mat, Real value, MatrixDim d) {
int32_cuda i = blockIdx.x * blockDim.x + threadIdx.x;
int32_cuda j = blockIdx.y * blockDim.y + threadIdx.y;
int32_cuda index = i + j*d.stride;
if ( i < d.cols && j < d.rows ) {
if (mat[index] < value) mat[index] = value;
}
}
template<typename Real>
__global__
static void _apply_truncate(Real* mat, Real low, Real high, MatrixDim d) {
int32_cuda i = blockIdx.x * blockDim.x + threadIdx.x;
int32_cuda j = blockIdx.y * blockDim.y + threadIdx.y;
int32_cuda index = i + j*d.stride;
if ( i < d.cols && j < d.rows ) {
if (mat[index] < low) mat[index] = low;
if (mat[index] > high) mat[index] = high;
}
}
template<typename Real>
__global__
static void _apply_log(Real* mat, MatrixDim d) {
int32_cuda i = blockIdx.x * blockDim.x + threadIdx.x;
int32_cuda j = blockIdx.y * blockDim.y + threadIdx.y;
int32_cuda index = i + j*d.stride;
if ( i < d.cols && j < d.rows )
mat[index] = log(mat[index]);
}
template<typename Real>
__global__
static void _apply_exp(Real* mat, MatrixDim d) {
int32_cuda i = blockIdx.x * blockDim.x + threadIdx.x;
int32_cuda j = blockIdx.y * blockDim.y + threadIdx.y;
int32_cuda index = i + j*d.stride;
if ( i < d.cols && j < d.rows )
mat[index] = exp(mat[index]);
}
template<typename Real>
__global__
static void _mul_elements(Real* mat, const Real* A, MatrixDim d) {
int32_cuda i = blockIdx.x * blockDim.x + threadIdx.x;
int32_cuda j = blockIdx.y * blockDim.y + threadIdx.y;
int32_cuda index = i + j*d.stride;
if ( i < d.cols && j < d.rows )
mat[index] = mat[index] * A[index];
}
template<typename Real>
__global__
static void _div_elements(Real* mat, const Real* A, MatrixDim d) {
int32_cuda i = blockIdx.x * blockDim.x + threadIdx.x;
int32_cuda j = blockIdx.y * blockDim.y + threadIdx.y;
int32_cuda index = i + j*d.stride;
if ( i < d.cols && j < d.rows )
mat[index] = mat[index] / A[index];
}
template<typename Real>
__global__
static void _mul_cols_vec(Real* mat, const Real* scale, MatrixDim d) {
int32_cuda i = blockIdx.x * blockDim.x + threadIdx.x;
int32_cuda j = blockIdx.y * blockDim.y + threadIdx.y;
int32_cuda index = i + j*d.stride;
if ( i < d.cols && j < d.rows )
mat[index] *= scale[i];
}
template<typename Real>
__global__
static void _mul_rows_vec(Real* mat, const Real* scale, MatrixDim d) {
int32_cuda i = blockIdx.x * blockDim.x + threadIdx.x;
int32_cuda j = blockIdx.y * blockDim.y + threadIdx.y;
int32_cuda index = i + j*d.stride;
if ( i < d.cols && j < d.rows )
mat[index] *= scale[j];
}
template<typename Real>
__global__
static void _div_cols_vec(Real* mat, const Real* vec_div, MatrixDim d) {
int32_cuda i = blockIdx.x * blockDim.x + threadIdx.x;
int32_cuda j = blockIdx.y * blockDim.y + threadIdx.y;
int32_cuda index = i + j*d.stride;
if( i >= d.cols ) return;
//invert divider in shared memory
__shared__ Real inv[16];
if(threadIdx.y==0) {
inv[threadIdx.x] = 1.0/vec_div[i];
}
__syncthreads();
//multiply elements
if ( i < d.cols && j < d.rows )
mat[index] *= inv[threadIdx.x];
}
template<typename Real>
__global__
static void _div_rows_vec(Real* mat, const Real* vec_div, MatrixDim d) {
int32_cuda i = blockIdx.x * blockDim.x + threadIdx.x;
int32_cuda j = blockIdx.y * blockDim.y + threadIdx.y;
int32_cuda index = i + j*d.stride;
if( j >= d.rows ) return;
//invert divider in shared memory
__shared__ Real inv[16];
if(threadIdx.x==0) {
inv[threadIdx.y] = 1.0/vec_div[j];
}
__syncthreads();
//multiply elements
if ( i < d.cols && j < d.rows )
mat[index] *= inv[threadIdx.y];
}
template<typename Real>
__global__
static void _add_mat(Real alpha, const Real* A, Real beta, Real* dst, MatrixDim d) {
int32_cuda i = blockIdx.x * blockDim.x + threadIdx.x;
int32_cuda j = blockIdx.y * blockDim.y + threadIdx.y;
int32_cuda index = i + j*d.stride;
if ( i < d.cols && j < d.rows )
dst[index] = alpha*A[index] + beta*dst[index];
}
template<typename Real>
__global__
static void _part_add_mat(Real alpha, const Real* A, MatrixDim da, int32_cuda ro, int32_cuda co, Real beta, Real* dst, MatrixDim d) {
int32_cuda i = blockIdx.x * blockDim.x + threadIdx.x;
int32_cuda j = blockIdx.y * blockDim.y + threadIdx.y;
int32_cuda index = i + j*d.stride;
int32_cuda ai = i - co;
int32_cuda aj = j - ro;
int32_cuda A_index = ai + aj * da.stride;
if ( i < d.cols && j < d.rows && ai >=0 && ai <da.cols && aj >=0 && aj < da.rows)
dst[index] = alpha*A[A_index] + beta*dst[index];
}
template<typename Real>
__global__
static void _log_add_exp_mat(const Real* A, Real* dst, MatrixDim d) {
int32_cuda i = blockIdx.x * blockDim.x + threadIdx.x;
int32_cuda j = blockIdx.y * blockDim.y + threadIdx.y;
int32_cuda index = i + j*d.stride;
if ( i < d.cols && j < d.rows ) {
// find max
double fa = A[index];
double fb = dst[index];
double max, sum;
if(fa > fb) {
sum = 1.0 + exp(fb - fa);
max=fa;
}else{
sum = exp(fa-fb) + 1.0;
max=fb;
}
dst[index] = max + log(sum);
}
}
template<typename Real>
__global__
static void _add_vec_to_cols(Real alpha, const Real* col, Real beta, Real* dst, MatrixDim d) {
int32_cuda i = blockIdx.x * blockDim.x + threadIdx.x;
int32_cuda j = blockIdx.y * blockDim.y + threadIdx.y;
int32_cuda index = i + j*d.stride;
if ( i < d.cols && j < d.rows )
dst[index] = alpha*col[j] + beta*dst[index];
}
template<typename Real>
__global__
static void _add_vec_to_rows(Real alpha, const Real* row, Real beta, Real* dst, MatrixDim d) {
int32_cuda i = blockIdx.x * blockDim.x + threadIdx.x;
int32_cuda j = blockIdx.y * blockDim.y + threadIdx.y;
int32_cuda index = i + j*d.stride;
if ( i < d.cols && j < d.rows )
dst[index] = alpha*row[i] + beta*dst[index];
}
template<typename Real>
__global__
static void _add_vec_to_partial_rows(Real alpha, int32_cuda offset, const Real* row, int32_cuda dim, Real beta, Real* dst, MatrixDim d) {
int32_cuda i = blockIdx.x * blockDim.x + threadIdx.x;
int32_cuda j = blockIdx.y * blockDim.y + threadIdx.y;
int32_cuda index = i + j*d.stride;
int32_cuda vec_index = i - offset;
if ( vec_index >= 0 && vec_index < dim && i < d.cols && j < d.rows )
dst[index] = alpha*row[vec_index] + beta*dst[index];
}
template<typename Real>
__global__
static void _apply_mask(Real* mat, const char* mask, MatrixDim dmat, MatrixDim dmask) {
int32_cuda i = blockIdx.x * blockDim.x + threadIdx.x;
int32_cuda j = blockIdx.y * blockDim.y + threadIdx.y;
int32_cuda index = i + j*dmat.stride;
int32_cuda index2 = i + j*dmask.stride;
if ( i < dmat.cols && j < dmat.rows )
if(mask[index2] == 0) mat[index] = 0;
}
/*
* CuVector
*/
template<typename Real>
__global__
static void _add_row_sum_mat(const Real* mat, Real* vec_sum, MatrixDim d) {
int32_cuda i = blockIdx.y * blockDim.y + threadIdx.y; //col
int32_cuda j = blockIdx.x * blockDim.x + threadIdx.x; //row
if(blockIdx.x > 0) return;
if(blockDim.y > 1) return;
__shared__ Real row_data[256];
//copy the input to row_data
row_data[j] = mat[i+j*d.stride];
__syncthreads();
//get the sum
Real sum = _sum_reduce(row_data);
__syncthreads();
//add to previously accumulated sum
if(threadIdx.x == 0)
vec_sum[i] += sum;
}
template<typename Real>
__global__
static void _add_col_sum_mat(const Real* mat, Real* vec_sum, MatrixDim d) {
int32_cuda i = blockIdx.x * blockDim.x + threadIdx.x; //row
int32_cuda j = blockIdx.y * blockDim.y + threadIdx.y; //col
if(blockIdx.x > 0) return;
if(blockDim.y > 1) return;
__shared__ Real row_data[256];
//copy the input to row_data
row_data[i] = mat[i+j*d.stride];
__syncthreads();
//get the sum
Real sum = _sum_reduce(row_data);
__syncthreads();
//add to previously accumulated sum
if(threadIdx.x == 0)
vec_sum[j] += sum;
}
template<typename Real>
__global__
static void _invert_elements(Real* data, MatrixDim d) {
int32_cuda i = blockIdx.x * blockDim.x + threadIdx.x;
int32_cuda j = blockIdx.y * blockDim.y + threadIdx.y;
int32_cuda index = i + j*d.stride;
if ( i < d.cols && j < d.rows )
data[index] = 1.0/data[index];
}
/*
* cu::
*/
template<typename Real>
__global__
static void _relu(Real*y, const Real*x, MatrixDim d) {
int32_cuda i = blockIdx.x * blockDim.x + threadIdx.x;
int32_cuda j = blockIdx.y * blockDim.y + threadIdx.y;
int32_cuda index = i + j*d.stride;
if( i < d.cols && j < d.rows ) {
Real res = (x[index] > 0.0) ? x[index] : 0.0;
y[index] = res;
}
}
template<typename Real>
__global__
static void _diff_relu(Real*eout, const Real*e, const Real*y, MatrixDim d) {
int32_cuda i = blockIdx.x * blockDim.x + threadIdx.x;
int32_cuda j = blockIdx.y * blockDim.y + threadIdx.y;
int32_cuda index = i + j*d.stride;
if( i < d.cols && j < d.rows )
eout[index] = (y[index] > 0.0) * e[index];
}
template<typename Real>
__global__
static void _softrelu(Real*y, const Real*x, MatrixDim d) {
int32_cuda i = blockIdx.x * blockDim.x + threadIdx.x;
int32_cuda j = blockIdx.y * blockDim.y + threadIdx.y;
int32_cuda index = i + j*d.stride;
if( i < d.cols && j < d.rows ) {
Real res = (x[index] > 4.0) ? x[index] : log(1.0+exp(x[index]));
y[index] = res;
}
}
template<typename Real>
__global__
static void _diff_softrelu(Real*eout, const Real*e, const Real*x, MatrixDim d) {
int32_cuda i = blockIdx.x * blockDim.x + threadIdx.x;
int32_cuda j = blockIdx.y * blockDim.y + threadIdx.y;
int32_cuda index = i + j*d.stride;
if( i < d.cols && j < d.rows ) {
if(x[index]>4.0){
eout[index] = e[index];
}else{
Real f = exp(x[index]);
eout[index] = e[index] * f / (1+f);
}
}
}
template<typename Real>
__global__
static void _sigmoid(Real*y, const Real*x, MatrixDim d) {
int32_cuda i = blockIdx.x * blockDim.x + threadIdx.x;
int32_cuda j = blockIdx.y * blockDim.y + threadIdx.y;
int32_cuda index = i + j*d.stride;
if( i < d.cols && j < d.rows ) {
Real res = 1.0 / (1.0 + exp(-x[index]));
y[index] = res;
}
}
template<typename Real>
__global__
static void _diff_sigmoid(Real*eout, const Real*e, const Real*y, MatrixDim d) {
int32_cuda i = blockIdx.x * blockDim.x + threadIdx.x;
int32_cuda j = blockIdx.y * blockDim.y + threadIdx.y;
int32_cuda index = i + j*d.stride;
if( i < d.cols && j < d.rows )
eout[index] = y[index]*(1.0-y[index]) * e[index];
}
template<typename Real>
__global__
static void _softmax(Real*y, const Real*x, MatrixDim d) {
int32_cuda j = blockIdx.x * blockDim.x + threadIdx.x;
if(j >= d.rows) return;
//copy to output and find max...
double max = -1e20;
double sum = 0.0;
for(int32_cuda i=0; i<d.cols; i++) {
if(max < x[i+j*d.stride]) max = x[i+j*d.stride];
y[i+j*d.stride] = x[i+j*d.stride];
}
//subtract max, apply exp, sum up...
for(int32_cuda i=0; i<d.cols; i++) {
y[i+j*d.stride] = exp(y[i+j*d.stride] - max);
sum += y[i+j*d.stride];
}
//normalize by sum...
for(int32_cuda i=0; i<d.cols; i++) {
y[i+j*d.stride] /= sum;
}
}
template<typename Real>
__global__
static void _expand(Real* y, const Real* x, const int32_cuda* off, MatrixDim d_out, MatrixDim d_in) {
int32_cuda i = blockIdx.x * blockDim.x + threadIdx.x;
int32_cuda j = blockIdx.y * blockDim.y + threadIdx.y;
int32_cuda index = i + j*d_out.stride;
if( i < d_out.cols && j < d_out.rows ) {
int32_cuda src_col = i % d_in.cols;
int32_cuda src_row = j + off[i / d_in.cols];
if(src_row < 0) src_row = 0;
if(src_row >= d_in.rows) src_row = d_in.rows-1;
y[index] = x[src_col + src_row*d_in.stride];
}
}
template<typename Real>
__global__
static void _copy(Real* y, const Real* x, const int32_cuda* copy_from, MatrixDim d_out, MatrixDim d_in) {
int32_cuda i = blockIdx.x * blockDim.x + threadIdx.x;
int32_cuda j = blockIdx.y * blockDim.y + threadIdx.y;
int32_cuda index = i + j*d_out.stride;
if( i < d_out.cols && j < d_out.rows ) {
int32_cuda src_col = copy_from[i];
if(src_col >= 0 && src_col < d_in.cols) {
y[index] = x[src_col + j*d_in.stride];
} else {
y[index] = 1.0/0.0;
}
}
}
template<typename Real>
__global__
static void _randomize(Real* y, const Real* x, const int32_cuda* copy_from, MatrixDim d_out, MatrixDim d_in) {
int32_cuda i = blockIdx.x * blockDim.x + threadIdx.x;
int32_cuda j = blockIdx.y * blockDim.y + threadIdx.y;
int32_cuda index = i + j*d_out.stride;
if( i < d_out.cols && j < d_out.rows ) {
int32_cuda src_row = copy_from[j];
y[index] = x[i + src_row*d_in.stride];
}
}
template<typename Real>
__global__
static void _regularize_l1(Real* wei, Real* grad, Real l1, Real lr, MatrixDim d) {
int32_cuda i = blockIdx.x * blockDim.x + threadIdx.x;
int32_cuda j = blockIdx.y * blockDim.y + threadIdx.y;
int32_cuda index = i + j*d.stride;
if ( i < d.cols && j < d.rows ) {
if(wei[index]==0.0) return; //skip L1 if zero weight!
Real l1_signed = l1;
if(wei[index] < 0.0) //flip sign
l1_signed = -l1;
Real before = wei[index];
Real after = wei[index] -lr*grad[index] -l1_signed;//simulate update
if((after > 0.0) ^ (before > 0.0)) { //sign changed?
wei[index] = 0.0;
grad[index] = 0.0;
} else {
wei[index] -= l1_signed;
}
}
}
template<typename Real>
__global__
static void _find_row_max_id(const Real* mat, Real* vec_val, int32_cuda* vec_id, int32_cuda voff, MatrixDim d) {
int32_cuda i = blockIdx.x * blockDim.x + threadIdx.x;
int32_cuda j = blockIdx.y * blockDim.y + threadIdx.y;
if(blockIdx.x > 0) return;
if(blockDim.y != 1) return;
__shared__ Real value[256];
__shared__ int32_cuda index[256];
//copy to shared memory
value[threadIdx.x] = mat[i+j*d.stride];
index[threadIdx.x] = threadIdx.x;
__syncthreads();
//get the id of the max value
int32_cuda out_max = _max_id_reduce(value,index);
__syncthreads();
//see if it's bigger value
if(threadIdx.x == 0) {
if(vec_val[j] <= mat[out_max+j*d.stride]) {
vec_val[j] = mat[out_max+j*d.stride];
vec_id[j] = voff+out_max;
}
}
}
template<typename Real>
__global__
static void _diff_xent(const int32_cuda* vec_tgt, Real* mat_net_out, Real* vec_log_post, MatrixDim d) {
int32_cuda i = blockIdx.x * blockDim.x + threadIdx.x;
int32_cuda j = blockIdx.y * blockDim.y + threadIdx.y;
if(i>0) return;
if(j<d.rows) {
int32_cuda index = vec_tgt[j] + j*d.stride;
Real value = mat_net_out[index];
if(value < 1e-20) value = 1e-20;
vec_log_post[j] = log(value);
mat_net_out[index] -= 1.0;
}
}
template<typename Real>
__global__
static void _softmax_part(const Real* X, const int32_cuda* vec_ids, Real* Y, MatrixDim d) {
int32_cuda i = blockIdx.x * blockDim.x + threadIdx.x;
int32_cuda j = blockIdx.y * blockDim.y + threadIdx.y;
int32_cuda index = i + j*d.stride;
if ( i < d.cols && j < d.rows ) {
Real tmp = X[index] - X[vec_ids[j] + j*d.stride];
Y[index] = exp(tmp);
}
}
/***********************************************************************
* ANSI-C wrappers of CUDA kernels
*/
/*
* "int32"
*/
void cudaI32_set_const(dim3 Gr, dim3 Bl, int32_cuda* mat, int32_cuda value, MatrixDim d) {
_set_const<<<Gr,Bl>>>(mat,value,d);
}
/*
* "float"
*/
/*
* CuMatrix
*/
void cudaF_set_const(dim3 Gr, dim3 Bl, float* mat, float value, MatrixDim d) {
_set_const<<<Gr,Bl>>>(mat,value,d);
}
void cudaF_binarize(dim3 Gr, dim3 Bl, float* mat, float thres, MatrixDim d) {
_binarize<<<Gr,Bl>>>(mat,thres,d);
}
void cudaF_add_const(dim3 Gr, dim3 Bl, float* mat, float value, MatrixDim d) {
_add_const<<<Gr,Bl>>>(mat,value,d);
}
void cudaF_power(dim3 Gr, dim3 Bl, float* mat, float pow, MatrixDim d) {
_power<<<Gr,Bl>>>(mat,pow,d);
}
void cudaF_scale(dim3 Gr, dim3 Bl, float* mat, float value, MatrixDim d) {
_scale<<<Gr,Bl>>>(mat,value,d);
}
void cudaF_apply_floor(dim3 Gr, dim3 Bl, float* mat, float value, MatrixDim d) {
_apply_floor<<<Gr,Bl>>>(mat,value,d);
}
void cudaF_apply_truncate(dim3 Gr, dim3 Bl, float* mat, float low, float high, MatrixDim d) {
_apply_truncate<<<Gr,Bl>>>(mat,low,high,d);
}
void cudaF_apply_log(dim3 Gr, dim3 Bl, float* mat, MatrixDim d) {
_apply_log<<<Gr,Bl>>>(mat,d);
}
void cudaF_apply_exp(dim3 Gr, dim3 Bl, float* mat, MatrixDim d) {
_apply_exp<<<Gr,Bl>>>(mat,d);
}
void cudaF_mul_elements(dim3 Gr, dim3 Bl, float* mat, const float* A, MatrixDim d) {
_mul_elements<<<Gr,Bl>>>(mat,A,d);
}
void cudaF_div_elements(dim3 Gr, dim3 Bl, float* mat, const float* A, MatrixDim d) {
_div_elements<<<Gr,Bl>>>(mat,A,d);
}
void cudaF_mul_cols_vec(dim3 Gr, dim3 Bl, float* mat, const float* scale, MatrixDim d) {
_mul_cols_vec<<<Gr,Bl>>>(mat,scale,d);
}
void cudaF_mul_rows_vec(dim3 Gr, dim3 Bl, float* mat, const float* scale, MatrixDim d) {
_mul_rows_vec<<<Gr,Bl>>>(mat,scale,d);
}
void cudaF_div_cols_vec(dim3 Gr, dim3 Bl, float* mat, const float* vec_div, MatrixDim d) {
_div_cols_vec<<<Gr,Bl>>>(mat, vec_div, d);
}
void cudaF_div_rows_vec(dim3 Gr, dim3 Bl, float* mat, const float* vec_div, MatrixDim d) {
_div_rows_vec<<<Gr,Bl>>>(mat, vec_div, d);
}
void cudaF_add_mat(dim3 Gr, dim3 Bl, float alpha, const float* A, float beta, float* dst, MatrixDim d) {
_add_mat<<<Gr,Bl>>>(alpha,A,beta,dst,d);
}
void cudaF_part_add_mat(dim3 Gr, dim3 Bl, float alpha, const float* A, MatrixDim da, int32_cuda ro, int32_cuda co, float beta, float* dst, MatrixDim d) {
_part_add_mat<<<Gr,Bl>>>(alpha,A,da,ro,co,beta,dst,d);
}
void cudaF_add_vec_to_cols(dim3 Gr, dim3 Bl, float alpha, const float* col, float beta, float* dst, MatrixDim d) {
_add_vec_to_cols<<<Gr,Bl>>>(alpha,col,beta,dst,d);
}
void cudaF_add_vec_to_rows(dim3 Gr, dim3 Bl, float alpha, const float* row, float beta, float* dst, MatrixDim d) {
_add_vec_to_rows<<<Gr,Bl>>>(alpha,row,beta,dst,d);
}
void cudaF_add_vec_to_partial_rows(dim3 Gr, dim3 Bl, float alpha, int32_cuda offset, const float* row, int32_cuda dim, float beta, float* dst, MatrixDim d) {
_add_vec_to_partial_rows<<<Gr,Bl>>>(alpha,offset,row,dim,beta,dst,d);
}
// CURRENTLY UNUSED...
void cudaF_apply_mask(dim3 Gr, dim3 Bl, float* mat, const char* mask, MatrixDim dmat, MatrixDim dmask) {
_apply_mask<<<Gr,Bl>>>(mat,mask,dmat,dmask);
}
void cudaF_log_add_exp_mat(dim3 Gr, dim3 Bl, const float* A, float* dst, MatrixDim d) {
_log_add_exp_mat<<<Gr,Bl>>>(A,dst,d);
}
/*
* CuVector
*/
void cudaF_add_row_sum_mat(dim3 Gr, dim3 Bl, const float* mat, float* vec_sum, MatrixDim d) {
_add_row_sum_mat<<<Gr,Bl>>>(mat,vec_sum,d);
}
void cudaF_add_col_sum_mat(dim3 Gr, dim3 Bl, const float* mat, float* vec_sum, MatrixDim d) {
_add_col_sum_mat<<<Gr,Bl>>>(mat,vec_sum,d);
}
void cudaF_invert_elements(dim3 Gr, dim3 Bl, float* data, MatrixDim d) {
_invert_elements<<<Gr,Bl>>>(data, d);
}
/*
* cu::
*/
void cudaF_relu (dim3 Gr, dim3 Bl, float* y, const float* x, MatrixDim d) {
_relu<<<Gr,Bl>>>(y, x, d);
}
void cudaF_diff_relu (dim3 Gr, dim3 Bl, float* eout, const float* e, const float* y, MatrixDim d) {
_diff_relu<<<Gr,Bl>>>(eout, e, y, d);
}
void cudaF_softrelu (dim3 Gr, dim3 Bl, float* y, const float* x, MatrixDim d) {
_softrelu<<<Gr,Bl>>>(y, x, d);
}
void cudaF_diff_softrelu (dim3 Gr, dim3 Bl, float* eout, const float* e, const float* x, MatrixDim d) {
_diff_softrelu<<<Gr,Bl>>>(eout, e, x, d);
}
void cudaF_sigmoid (dim3 Gr, dim3 Bl, float* y, const float* x, MatrixDim d) {
_sigmoid<<<Gr,Bl>>>(y, x, d);
}
void cudaF_diff_sigmoid (dim3 Gr, dim3 Bl, float* eout, const float* e, const float* y, MatrixDim d) {
_diff_sigmoid<<<Gr,Bl>>>(eout, e, y, d);
}
void cudaF_softmax (size_t Gr, size_t Bl, float* y, const float* x, MatrixDim d) {
_softmax<<<Gr,Bl>>>(y, x, d);
}
void cudaF_softmax_part(dim3 Gr, dim3 Bl, const float* X, const int32_cuda* vec_ids, float* Y, MatrixDim d) {
_softmax_part<<<Gr,Bl>>>(X,vec_ids,Y,d);
}
void cudaF_expand(dim3 Gr, dim3 Bl, float* y, const float* x, const int32_cuda* off, MatrixDim d_out, MatrixDim d_in) {
_expand<<<Gr,Bl>>>(y,x,off,d_out,d_in);
}
void cudaF_copy(dim3 Gr, dim3 Bl, float* y, const float* x, const int32_cuda* copy_from, MatrixDim d_out, MatrixDim d_in) {
_copy<<<Gr,Bl>>>(y,x,copy_from,d_out,d_in);
}
void cudaF_randomize(dim3 Gr, dim3 Bl, float* y, const float* x, const int32_cuda* copy_from, MatrixDim d_out, MatrixDim d_in) {
_randomize<<<Gr,Bl>>>(y,x,copy_from,d_out,d_in);
}
void cudaF_regularize_l1(dim3 Gr, dim3 Bl, float* wei, float* grad, float l1, float lr, MatrixDim d) {
_regularize_l1<<<Gr,Bl>>>(wei,grad,l1,lr,d);
}
void cudaF_find_row_max_id(dim3 Gr, dim3 Bl, const float* mat, float* vec_val, int32_cuda* vec_id, int32_cuda voff, MatrixDim d) {
_find_row_max_id<<<Gr,Bl>>>(mat, vec_val, vec_id, voff, d);
}
void cudaF_diff_xent(dim3 Gr, dim3 Bl, const int32_cuda* vec_tgt, float* mat_net_out, float* vec_log_post, MatrixDim d) {
_diff_xent<<<Gr,Bl>>>(vec_tgt,mat_net_out,vec_log_post,d);
}
/*
* "double"
*/
/*
* CuMatrix
*/
void cudaD_set_const(dim3 Gr, dim3 Bl, double* mat, double value, MatrixDim d) {
_set_const<<<Gr,Bl>>>(mat,value,d);
}
void cudaD_binarize(dim3 Gr, dim3 Bl, double* mat, double thres, MatrixDim d) {
_binarize<<<Gr,Bl>>>(mat,thres,d);
}
void cudaD_add_const(dim3 Gr, dim3 Bl, double* mat, double value, MatrixDim d) {
_add_const<<<Gr,Bl>>>(mat,value,d);
}
void cudaD_power(dim3 Gr, dim3 Bl, double* mat, double pow, MatrixDim d) {
_power<<<Gr,Bl>>>(mat,pow,d);
}
void cudaD_scale(dim3 Gr, dim3 Bl, double* mat, double value, MatrixDim d) {
_scale<<<Gr,Bl>>>(mat,value,d);
}
void cudaD_apply_floor(dim3 Gr, dim3 Bl, double* mat, double value, MatrixDim d) {
_apply_floor<<<Gr,Bl>>>(mat,value,d);
}
void cudaD_apply_truncate(dim3 Gr, dim3 Bl, double* mat, double low, double high, MatrixDim d) {
_apply_truncate<<<Gr,Bl>>>(mat,low,high,d);
}
void cudaD_apply_log(dim3 Gr, dim3 Bl, double* mat, MatrixDim d) {
_apply_log<<<Gr,Bl>>>(mat,d);
}
void cudaD_apply_exp(dim3 Gr, dim3 Bl, double* mat, MatrixDim d) {
_apply_exp<<<Gr,Bl>>>(mat,d);
}
void cudaD_mul_elements(dim3 Gr, dim3 Bl, double* mat, const double* A, MatrixDim d) {
_mul_elements<<<Gr,Bl>>>(mat,A,d);
}
void cudaD_div_elements(dim3 Gr, dim3 Bl, double* mat, const double* A, MatrixDim d) {
_div_elements<<<Gr,Bl>>>(mat,A,d);
}
void cudaD_mul_cols_vec(dim3 Gr, dim3 Bl, double* mat, const double* scale, MatrixDim d) {
_mul_cols_vec<<<Gr,Bl>>>(mat,scale,d);
}
void cudaD_mul_rows_vec(dim3 Gr, dim3 Bl, double* mat, const double* scale, MatrixDim d) {
_mul_rows_vec<<<Gr,Bl>>>(mat,scale,d);
}
void cudaD_div_cols_vec(dim3 Gr, dim3 Bl, double* mat, const double* vec_div, MatrixDim d) {
_div_cols_vec<<<Gr,Bl>>>(mat, vec_div, d);
}
void cudaD_div_rows_vec(dim3 Gr, dim3 Bl, double* mat, const double* vec_div, MatrixDim d) {
_div_rows_vec<<<Gr,Bl>>>(mat, vec_div, d);
}
void cudaD_add_mat(dim3 Gr, dim3 Bl, double alpha, const double* A, double beta, double* dst, MatrixDim d) {
_add_mat<<<Gr,Bl>>>(alpha,A,beta,dst,d);
}
void cudaD_prat_add_mat(dim3 Gr, dim3 Bl, double alpha, const double* A, MatrixDim da, int32_cuda ro, int32_cuda co, double beta, double* dst, MatrixDim d){
_part_add_mat<<<Gr,Bl>>>(alpha,A,da,ro,co,beta,dst,d);
}
void cudaD_add_vec_to_cols(dim3 Gr, dim3 Bl, double alpha, const double* col, double beta, double* dst, MatrixDim d) {
_add_vec_to_cols<<<Gr,Bl>>>(alpha,col,beta,dst,d);
}
void cudaD_add_vec_to_rows(dim3 Gr, dim3 Bl, double alpha, const double* row, double beta, double* dst, MatrixDim d) {
_add_vec_to_rows<<<Gr,Bl>>>(alpha,row,beta,dst,d);
}
void cudaD_add_vec_to_partial_rows(dim3 Gr, dim3 Bl, double alpha, int32_cuda offset, const double* row, int32_cuda dim, double beta, double* dst, MatrixDim d){
_add_vec_to_partial_rows<<<Gr,Bl>>>(alpha,offset,row,dim,beta,dst,d);
}
// CURRENTLY UNUSED...
void cudaD_apply_mask(dim3 Gr, dim3 Bl, double* mat, const char* mask, MatrixDim dmat, MatrixDim dmask) {
_apply_mask<<<Gr,Bl>>>(mat,mask,dmat,dmask);
}
void cudaD_log_add_exp_mat(dim3 Gr, dim3 Bl, const double* A, double* dst, MatrixDim d) {
_log_add_exp_mat<<<Gr,Bl>>>(A,dst,d);
}
/*
* CuVector
*/
void cudaD_add_row_sum_mat(dim3 Gr, dim3 Bl, const double* mat, double* vec_sum, MatrixDim d) {
_add_row_sum_mat<<<Gr,Bl>>>(mat,vec_sum,d);
}
void cudaD_add_col_sum_mat(dim3 Gr, dim3 Bl, const double* mat, double* vec_sum, MatrixDim d) {
_add_col_sum_mat<<<Gr,Bl>>>(mat,vec_sum,d);
}
void cudaD_invert_elements(dim3 Gr, dim3 Bl, double* data, MatrixDim d) {
_invert_elements<<<Gr,Bl>>>(data, d);
}
/*
* cu::
*/
void cudaD_relu (dim3 Gr, dim3 Bl, double* y, const double* x, MatrixDim d) {
_relu<<<Gr,Bl>>>(y, x, d);
}
void cudaD_diff_relu (dim3 Gr, dim3 Bl, double* eout, const double* e, const double* y, MatrixDim d) {
_diff_relu<<<Gr,Bl>>>(eout, e, y, d);
}
void cudaD_softrelu (dim3 Gr, dim3 Bl, double* y, const double* x, MatrixDim d) {
_softrelu<<<Gr,Bl>>>(y, x, d);
}
void cudaD_diff_softrelu (dim3 Gr, dim3 Bl, double* eout, const double* e, const double* x, MatrixDim d) {
_diff_softrelu<<<Gr,Bl>>>(eout, e, x, d);
}
void cudaD_sigmoid (dim3 Gr, dim3 Bl, double* y, const double* x, MatrixDim d) {
_sigmoid<<<Gr,Bl>>>(y, x, d);
}
void cudaD_diff_sigmoid (dim3 Gr, dim3 Bl, double* eout, const double* e, const double* y, MatrixDim d) {
_diff_sigmoid<<<Gr,Bl>>>(eout, e, y, d);
}
void cudaD_softmax (size_t Gr, size_t Bl, double* y, const double* x, MatrixDim d) {
_softmax<<<Gr,Bl>>>(y, x, d);
}
void cudaD_softmax_part(dim3 Gr, dim3 Bl, const double* X, const int32_cuda* vec_ids, double* Y, MatrixDim d) {
_softmax_part<<<Gr,Bl>>>(X,vec_ids,Y,d);
}
void cudaD_expand(dim3 Gr, dim3 Bl, double* y, const double* x, const int32_cuda* off, MatrixDim d_out, MatrixDim d_in) {
_expand<<<Gr,Bl>>>(y,x,off,d_out,d_in);
}
void cudaD_copy(dim3 Gr, dim3 Bl, double* y, const double* x, const int32_cuda* copy_from, MatrixDim d_out, MatrixDim d_in) {
_copy<<<Gr,Bl>>>(y,x,copy_from,d_out,d_in);
}
void cudaD_randomize(dim3 Gr, dim3 Bl, double* y, const double* x, const int32_cuda* copy_from, MatrixDim d_out, MatrixDim d_in) {
_randomize<<<Gr,Bl>>>(y,x,copy_from,d_out,d_in);
}
void cudaD_regularize_l1(dim3 Gr, dim3 Bl, double* wei, double* grad, double l1, double lr, MatrixDim d) {
_regularize_l1<<<Gr,Bl>>>(wei,grad,l1,lr,d);
}
void cudaD_find_row_max_id(dim3 Gr, dim3 Bl, const double* mat, double* vec_val, int32_cuda* vec_id, int32_cuda voff, MatrixDim d) {
_find_row_max_id<<<Gr,Bl>>>(mat, vec_val, vec_id, voff, d);
}
void cudaD_diff_xent(dim3 Gr, dim3 Bl, const int32_cuda* vec_tgt, double* mat_net_out, double* vec_log_post, MatrixDim d) {
_diff_xent<<<Gr,Bl>>>(vec_tgt,mat_net_out,vec_log_post,d);
}
|
fda8cb9517f45dde2cae9f397310f059b489264c.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <stdio.h>
#include "gputimer.h"
const int N= 1024; // matrix size is NxN
const int K= 32; // tile size is KxK
// Utility functions: compare, print, and fill matrices
#define checkCudaErrors(val) check( (val), #val, __FILE__, __LINE__)
template<typename T>
void check(T err, const char* const func, const char* const file, const int line)
{
if (err != hipSuccess) {
fprintf(stderr, "CUDA error at: %s : %d\n", file,line);
fprintf(stderr, "%s %s\n", hipGetErrorString(err), func);;
exit(1);
}
}
int compare_matrices(float *gpu, float *ref)
{
int result = 0;
for(int j=0; j < N; j++)
for(int i=0; i < N; i++)
if (ref[i + j*N] != gpu[i + j*N])
{
// printf("reference(%d,%d) = %f but test(%d,%d) = %f\n",
// i,j,ref[i+j*N],i,j,test[i+j*N]);
result = 1;
}
return result;
}
void print_matrix(float *mat)
{
for(int j=0; j < N; j++)
{
for(int i=0; i < N; i++) { printf("%4.4g ", mat[i + j*N]); }
printf("\n");
}
}
// fill a matrix with sequential numbers in the range 0..N-1
void fill_matrix(float *mat)
{
for(int j=0; j < N * N; j++)
mat[j] = (float) j;
}
void
transpose_CPU(float in[], float out[])
{
for(int j=0; j < N; j++)
for(int i=0; i < N; i++)
out[j + i*N] = in[i + j*N]; // out(j,i) = in(i,j)
}
// to be launched on a single thread
__global__ void
transpose_serial(float in[], float out[])
{
for(int j=0; j < N; j++)
for(int i=0; i < N; i++)
out[j + i*N] = in[i + j*N]; // out(j,i) = in(i,j)
}
// to be launched with one thread per row of output matrix
__global__ void
transpose_parallel_per_row(float in[], float out[])
{
int i = threadIdx.x;
for(int j=0; j < N; j++)
out[j + i*N] = in[i + j*N]; // out(j,i) = in(i,j)
}
// to be launched with one thread per element, in KxK threadblocks
// thread (x,y) in grid writes element (i,j) of output matrix
__global__ void
transpose_parallel_per_element(float in[], float out[])
{
int i = blockIdx.x * K + threadIdx.x;
int j = blockIdx.y * K + threadIdx.y;
out[j + i*N] = in[i + j*N]; // out(j,i) = in(i,j)
}
// to be launched with one thread per element, in (tilesize)x(tilesize) threadblocks
// thread blocks read & write tiles, in coalesced fashion
// adjacent threads read adjacent input elements, write adjacent output elmts
__global__ void
transpose_parallel_per_element_tiled(float in[], float out[])
{
// (i,j) locations of the tile corners for input & output matrices:
int in_corner_i = blockIdx.x * K, in_corner_j = blockIdx.y * K;
int out_corner_i = blockIdx.y * K, out_corner_j = blockIdx.x * K;
int x = threadIdx.x, y = threadIdx.y;
__shared__ float tile[K][K];
// coalesced read from global mem, TRANSPOSED write into shared mem:
tile[y][x] = in[(in_corner_i + x) + (in_corner_j + y)*N];
__syncthreads();
// read from shared mem, coalesced write to global mem:
out[(out_corner_i + x) + (out_corner_j + y)*N] = tile[x][y];
}
// to be launched with one thread per element, in (tilesize)x(tilesize) threadblocks
// thread blocks read & write tiles, in coalesced fashion
// adjacent threads read adjacent input elements, write adjacent output elmts
__global__ void
transpose_parallel_per_element_tiled16(float in[], float out[])
{
// (i,j) locations of the tile corners for input & output matrices:
int in_corner_i = blockIdx.x * 16, in_corner_j = blockIdx.y * 16;
int out_corner_i = blockIdx.y * 16, out_corner_j = blockIdx.x * 16;
int x = threadIdx.x, y = threadIdx.y;
__shared__ float tile[16][16];
// coalesced read from global mem, TRANSPOSED write into shared mem:
tile[y][x] = in[(in_corner_i + x) + (in_corner_j + y)*N];
__syncthreads();
// read from shared mem, coalesced write to global mem:
out[(out_corner_i + x) + (out_corner_j + y)*N] = tile[x][y];
}
// to be launched with one thread per element, in KxK threadblocks
// thread blocks read & write tiles, in coalesced fashion
// shared memory array padded to avoid bank conflicts
__global__ void
transpose_parallel_per_element_tiled_padded(float in[], float out[])
{
// (i,j) locations of the tile corners for input & output matrices:
int in_corner_i = blockIdx.x * K, in_corner_j = blockIdx.y * K;
int out_corner_i = blockIdx.y * K, out_corner_j = blockIdx.x * K;
int x = threadIdx.x, y = threadIdx.y;
__shared__ float tile[K][K+1];
// coalesced read from global mem, TRANSPOSED write into shared mem:
tile[y][x] = in[(in_corner_i + x) + (in_corner_j + y)*N];
__syncthreads();
// read from shared mem, coalesced write to global mem:
out[(out_corner_i + x) + (out_corner_j + y)*N] = tile[x][y];
}
// to be launched with one thread per element, in KxK threadblocks
// thread blocks read & write tiles, in coalesced fashion
// shared memory array padded to avoid bank conflicts
__global__ void
transpose_parallel_per_element_tiled_padded16(float in[], float out[])
{
// (i,j) locations of the tile corners for input & output matrices:
int in_corner_i = blockIdx.x * 16, in_corner_j = blockIdx.y * 16;
int out_corner_i = blockIdx.y * 16, out_corner_j = blockIdx.x * 16;
int x = threadIdx.x, y = threadIdx.y;
__shared__ float tile[16][16+1];
// coalesced read from global mem, TRANSPOSED write into shared mem:
tile[y][x] = in[(in_corner_i + x) + (in_corner_j + y)*N];
__syncthreads();
// read from shared mem, coalesced write to global mem:
out[(out_corner_i + x) + (out_corner_j + y)*N] = tile[x][y];
}
int main(int argc, char **argv)
{
int numbytes = N * N * sizeof(float);
float *in = (float *) malloc(numbytes);
float *out = (float *) malloc(numbytes);
float *gold = (float *) malloc(numbytes);
fill_matrix(in);
transpose_CPU(in, gold);
float *d_in, *d_out;
hipMalloc(&d_in, numbytes);
hipMalloc(&d_out, numbytes);
hipMemcpy(d_in, in, numbytes, hipMemcpyHostToDevice);
GpuTimer timer;
/*
* Now time each kernel and verify that it produces the correct result.
*
* To be really careful about benchmarking purposes, we should run every kernel once
* to "warm" the system and avoid any compilation or code-caching effects, then run
* every kernel 10 or 100 times and average the timings to smooth out any variance.
* But this makes for messy code and our goal is teaching, not detailed benchmarking.
*/
/*
timer.Start();
transpose_serial<<<1,1>>>(d_in, d_out);
timer.Stop();
hipMemcpy(out, d_out, numbytes, hipMemcpyDeviceToHost);
printf("transpose_serial: %g ms.\nVerifying transpose...%s\n",
timer.Elapsed(), compare_matrices(out, gold) ? "Failed" : "Success");
*/
timer.Start();
hipLaunchKernelGGL(( transpose_parallel_per_row), dim3(1),dim3(N), 0, 0, d_in, d_out);
timer.Stop();
hipMemcpy(out, d_out, numbytes, hipMemcpyDeviceToHost);
printf("transpose_parallel_per_row: %g ms.\nVerifying transpose...%s\n",
timer.Elapsed(), compare_matrices(out, gold) ? "Failed" : "Success");
dim3 blocks(N/K,N/K); // blocks per grid
dim3 threads(K,K); // threads per block
timer.Start();
hipLaunchKernelGGL(( transpose_parallel_per_element), dim3(blocks),dim3(threads), 0, 0, d_in, d_out);
timer.Stop();
hipMemcpy(out, d_out, numbytes, hipMemcpyDeviceToHost);
printf("transpose_parallel_per_element: %g ms.\nVerifying transpose...%s\n",
timer.Elapsed(), compare_matrices(out, gold) ? "Failed" : "Success");
timer.Start();
hipLaunchKernelGGL(( transpose_parallel_per_element_tiled), dim3(blocks),dim3(threads), 0, 0, d_in, d_out);
timer.Stop();
hipMemcpy(out, d_out, numbytes, hipMemcpyDeviceToHost);
printf("transpose_parallel_per_element_tiled %dx%d: %g ms.\nVerifying ...%s\n",
K, K, timer.Elapsed(), compare_matrices(out, gold) ? "Failed" : "Success");
dim3 blocks16x16(N/16,N/16); // blocks per grid
dim3 threads16x16(16,16); // threads per block
timer.Start();
hipLaunchKernelGGL(( transpose_parallel_per_element_tiled16), dim3(blocks16x16),dim3(threads16x16), 0, 0, d_in, d_out);
timer.Stop();
hipMemcpy(out, d_out, numbytes, hipMemcpyDeviceToHost);
printf("transpose_parallel_per_element_tiled 16x16: %g ms.\nVerifying ...%s\n",
timer.Elapsed(), compare_matrices(out, gold) ? "Failed" : "Success");
timer.Start();
hipLaunchKernelGGL(( transpose_parallel_per_element_tiled_padded16), dim3(blocks16x16),dim3(threads16x16), 0, 0, d_in, d_out);
timer.Stop();
hipMemcpy(out, d_out, numbytes, hipMemcpyDeviceToHost);
printf("transpose_parallel_per_element_tiled_padded 16x16: %g ms.\nVerifying...%s\n",
timer.Elapsed(), compare_matrices(out, gold) ? "Failed" : "Success");
hipFree(d_in);
hipFree(d_out);
}
| fda8cb9517f45dde2cae9f397310f059b489264c.cu | #include <stdio.h>
#include "gputimer.h"
const int N= 1024; // matrix size is NxN
const int K= 32; // tile size is KxK
// Utility functions: compare, print, and fill matrices
#define checkCudaErrors(val) check( (val), #val, __FILE__, __LINE__)
template<typename T>
void check(T err, const char* const func, const char* const file, const int line)
{
if (err != cudaSuccess) {
fprintf(stderr, "CUDA error at: %s : %d\n", file,line);
fprintf(stderr, "%s %s\n", cudaGetErrorString(err), func);;
exit(1);
}
}
int compare_matrices(float *gpu, float *ref)
{
int result = 0;
for(int j=0; j < N; j++)
for(int i=0; i < N; i++)
if (ref[i + j*N] != gpu[i + j*N])
{
// printf("reference(%d,%d) = %f but test(%d,%d) = %f\n",
// i,j,ref[i+j*N],i,j,test[i+j*N]);
result = 1;
}
return result;
}
void print_matrix(float *mat)
{
for(int j=0; j < N; j++)
{
for(int i=0; i < N; i++) { printf("%4.4g ", mat[i + j*N]); }
printf("\n");
}
}
// fill a matrix with sequential numbers in the range 0..N-1
void fill_matrix(float *mat)
{
for(int j=0; j < N * N; j++)
mat[j] = (float) j;
}
void
transpose_CPU(float in[], float out[])
{
for(int j=0; j < N; j++)
for(int i=0; i < N; i++)
out[j + i*N] = in[i + j*N]; // out(j,i) = in(i,j)
}
// to be launched on a single thread
__global__ void
transpose_serial(float in[], float out[])
{
for(int j=0; j < N; j++)
for(int i=0; i < N; i++)
out[j + i*N] = in[i + j*N]; // out(j,i) = in(i,j)
}
// to be launched with one thread per row of output matrix
__global__ void
transpose_parallel_per_row(float in[], float out[])
{
int i = threadIdx.x;
for(int j=0; j < N; j++)
out[j + i*N] = in[i + j*N]; // out(j,i) = in(i,j)
}
// to be launched with one thread per element, in KxK threadblocks
// thread (x,y) in grid writes element (i,j) of output matrix
__global__ void
transpose_parallel_per_element(float in[], float out[])
{
int i = blockIdx.x * K + threadIdx.x;
int j = blockIdx.y * K + threadIdx.y;
out[j + i*N] = in[i + j*N]; // out(j,i) = in(i,j)
}
// to be launched with one thread per element, in (tilesize)x(tilesize) threadblocks
// thread blocks read & write tiles, in coalesced fashion
// adjacent threads read adjacent input elements, write adjacent output elmts
__global__ void
transpose_parallel_per_element_tiled(float in[], float out[])
{
// (i,j) locations of the tile corners for input & output matrices:
int in_corner_i = blockIdx.x * K, in_corner_j = blockIdx.y * K;
int out_corner_i = blockIdx.y * K, out_corner_j = blockIdx.x * K;
int x = threadIdx.x, y = threadIdx.y;
__shared__ float tile[K][K];
// coalesced read from global mem, TRANSPOSED write into shared mem:
tile[y][x] = in[(in_corner_i + x) + (in_corner_j + y)*N];
__syncthreads();
// read from shared mem, coalesced write to global mem:
out[(out_corner_i + x) + (out_corner_j + y)*N] = tile[x][y];
}
// to be launched with one thread per element, in (tilesize)x(tilesize) threadblocks
// thread blocks read & write tiles, in coalesced fashion
// adjacent threads read adjacent input elements, write adjacent output elmts
__global__ void
transpose_parallel_per_element_tiled16(float in[], float out[])
{
// (i,j) locations of the tile corners for input & output matrices:
int in_corner_i = blockIdx.x * 16, in_corner_j = blockIdx.y * 16;
int out_corner_i = blockIdx.y * 16, out_corner_j = blockIdx.x * 16;
int x = threadIdx.x, y = threadIdx.y;
__shared__ float tile[16][16];
// coalesced read from global mem, TRANSPOSED write into shared mem:
tile[y][x] = in[(in_corner_i + x) + (in_corner_j + y)*N];
__syncthreads();
// read from shared mem, coalesced write to global mem:
out[(out_corner_i + x) + (out_corner_j + y)*N] = tile[x][y];
}
// to be launched with one thread per element, in KxK threadblocks
// thread blocks read & write tiles, in coalesced fashion
// shared memory array padded to avoid bank conflicts
__global__ void
transpose_parallel_per_element_tiled_padded(float in[], float out[])
{
// (i,j) locations of the tile corners for input & output matrices:
int in_corner_i = blockIdx.x * K, in_corner_j = blockIdx.y * K;
int out_corner_i = blockIdx.y * K, out_corner_j = blockIdx.x * K;
int x = threadIdx.x, y = threadIdx.y;
__shared__ float tile[K][K+1];
// coalesced read from global mem, TRANSPOSED write into shared mem:
tile[y][x] = in[(in_corner_i + x) + (in_corner_j + y)*N];
__syncthreads();
// read from shared mem, coalesced write to global mem:
out[(out_corner_i + x) + (out_corner_j + y)*N] = tile[x][y];
}
// to be launched with one thread per element, in KxK threadblocks
// thread blocks read & write tiles, in coalesced fashion
// shared memory array padded to avoid bank conflicts
__global__ void
transpose_parallel_per_element_tiled_padded16(float in[], float out[])
{
// (i,j) locations of the tile corners for input & output matrices:
int in_corner_i = blockIdx.x * 16, in_corner_j = blockIdx.y * 16;
int out_corner_i = blockIdx.y * 16, out_corner_j = blockIdx.x * 16;
int x = threadIdx.x, y = threadIdx.y;
__shared__ float tile[16][16+1];
// coalesced read from global mem, TRANSPOSED write into shared mem:
tile[y][x] = in[(in_corner_i + x) + (in_corner_j + y)*N];
__syncthreads();
// read from shared mem, coalesced write to global mem:
out[(out_corner_i + x) + (out_corner_j + y)*N] = tile[x][y];
}
int main(int argc, char **argv)
{
int numbytes = N * N * sizeof(float);
float *in = (float *) malloc(numbytes);
float *out = (float *) malloc(numbytes);
float *gold = (float *) malloc(numbytes);
fill_matrix(in);
transpose_CPU(in, gold);
float *d_in, *d_out;
cudaMalloc(&d_in, numbytes);
cudaMalloc(&d_out, numbytes);
cudaMemcpy(d_in, in, numbytes, cudaMemcpyHostToDevice);
GpuTimer timer;
/*
* Now time each kernel and verify that it produces the correct result.
*
* To be really careful about benchmarking purposes, we should run every kernel once
* to "warm" the system and avoid any compilation or code-caching effects, then run
* every kernel 10 or 100 times and average the timings to smooth out any variance.
* But this makes for messy code and our goal is teaching, not detailed benchmarking.
*/
/*
timer.Start();
transpose_serial<<<1,1>>>(d_in, d_out);
timer.Stop();
cudaMemcpy(out, d_out, numbytes, cudaMemcpyDeviceToHost);
printf("transpose_serial: %g ms.\nVerifying transpose...%s\n",
timer.Elapsed(), compare_matrices(out, gold) ? "Failed" : "Success");
*/
timer.Start();
transpose_parallel_per_row<<<1,N>>>(d_in, d_out);
timer.Stop();
cudaMemcpy(out, d_out, numbytes, cudaMemcpyDeviceToHost);
printf("transpose_parallel_per_row: %g ms.\nVerifying transpose...%s\n",
timer.Elapsed(), compare_matrices(out, gold) ? "Failed" : "Success");
dim3 blocks(N/K,N/K); // blocks per grid
dim3 threads(K,K); // threads per block
timer.Start();
transpose_parallel_per_element<<<blocks,threads>>>(d_in, d_out);
timer.Stop();
cudaMemcpy(out, d_out, numbytes, cudaMemcpyDeviceToHost);
printf("transpose_parallel_per_element: %g ms.\nVerifying transpose...%s\n",
timer.Elapsed(), compare_matrices(out, gold) ? "Failed" : "Success");
timer.Start();
transpose_parallel_per_element_tiled<<<blocks,threads>>>(d_in, d_out);
timer.Stop();
cudaMemcpy(out, d_out, numbytes, cudaMemcpyDeviceToHost);
printf("transpose_parallel_per_element_tiled %dx%d: %g ms.\nVerifying ...%s\n",
K, K, timer.Elapsed(), compare_matrices(out, gold) ? "Failed" : "Success");
dim3 blocks16x16(N/16,N/16); // blocks per grid
dim3 threads16x16(16,16); // threads per block
timer.Start();
transpose_parallel_per_element_tiled16<<<blocks16x16,threads16x16>>>(d_in, d_out);
timer.Stop();
cudaMemcpy(out, d_out, numbytes, cudaMemcpyDeviceToHost);
printf("transpose_parallel_per_element_tiled 16x16: %g ms.\nVerifying ...%s\n",
timer.Elapsed(), compare_matrices(out, gold) ? "Failed" : "Success");
timer.Start();
transpose_parallel_per_element_tiled_padded16<<<blocks16x16,threads16x16>>>(d_in, d_out);
timer.Stop();
cudaMemcpy(out, d_out, numbytes, cudaMemcpyDeviceToHost);
printf("transpose_parallel_per_element_tiled_padded 16x16: %g ms.\nVerifying...%s\n",
timer.Elapsed(), compare_matrices(out, gold) ? "Failed" : "Success");
cudaFree(d_in);
cudaFree(d_out);
}
|
b3c07f7dcd7b9d60e912cd31076a33e855b250f4.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/*
-- MAGMA (version 2.5.0) --
Univ. of Tennessee, Knoxville
Univ. of California, Berkeley
Univ. of Colorado, Denver
@date January 2019
@generated from sparse/blas/zgeaxpy.cu, normal z -> s, Wed Jan 2 14:18:53 2019
*/
#include "magmasparse_internal.h"
#define BLOCK_SIZE 256
// axpy kernel for matrices stored in the MAGMA format
__global__ void
sgeaxpy_kernel(
int num_rows,
int num_cols,
float alpha,
float * dx,
float beta,
float * dy)
{
int row = blockIdx.x*blockDim.x+threadIdx.x;
int j;
if( row<num_rows ){
for( j=0; j<num_cols; j++ ){
int idx = row + j*num_rows;
dy[ idx ] = alpha * dx[ idx ] + beta * dy[ idx ];
}
}
}
/**
Purpose
-------
This routine computes Y = alpha * X + beta * Y on the GPU.
The input format is magma_s_matrix. It can handle both,
dense matrix (vector block) and CSR matrices. For the latter,
it interfaces the cuSPARSE library.
Arguments
---------
@param[in]
alpha float
scalar multiplier.
@param[in]
X magma_s_matrix
input/output matrix Y.
@param[in]
beta float
scalar multiplier.
@param[in,out]
Y magma_s_matrix*
input matrix X.
@param[in]
queue magma_queue_t
Queue to execute in.
@ingroup magmasparse_sblas
********************************************************************/
extern "C"
magma_int_t
magma_sgeaxpy(
float alpha,
magma_s_matrix X,
float beta,
magma_s_matrix *Y,
magma_queue_t queue )
{
int m = X.num_rows;
int n = X.num_cols;
magma_s_matrix C={Magma_CSR};
if( X.storage_type == Magma_DENSE && Y->storage_type == Magma_DENSE ){
dim3 grid( magma_ceildiv( m, BLOCK_SIZE ) );
magma_int_t threads = BLOCK_SIZE;
hipLaunchKernelGGL(( sgeaxpy_kernel), dim3(grid), dim3(threads), 0, queue->cuda_stream() ,
m, n, alpha, X.dval, beta, Y->dval );
} else if( X.storage_type == Magma_CSR && Y->storage_type == Magma_CSR ) {
magma_scuspaxpy( &alpha, X, &beta, *Y, &C, queue );
magma_smfree( Y, queue );
magma_smtransfer( C, Y, Magma_DEV, Magma_DEV, queue );
magma_smfree( &C, queue );
} else {
printf("%% error: matrix addition only supported for DENSE and CSR format.\n");
}
return MAGMA_SUCCESS;
}
| b3c07f7dcd7b9d60e912cd31076a33e855b250f4.cu | /*
-- MAGMA (version 2.5.0) --
Univ. of Tennessee, Knoxville
Univ. of California, Berkeley
Univ. of Colorado, Denver
@date January 2019
@generated from sparse/blas/zgeaxpy.cu, normal z -> s, Wed Jan 2 14:18:53 2019
*/
#include "magmasparse_internal.h"
#define BLOCK_SIZE 256
// axpy kernel for matrices stored in the MAGMA format
__global__ void
sgeaxpy_kernel(
int num_rows,
int num_cols,
float alpha,
float * dx,
float beta,
float * dy)
{
int row = blockIdx.x*blockDim.x+threadIdx.x;
int j;
if( row<num_rows ){
for( j=0; j<num_cols; j++ ){
int idx = row + j*num_rows;
dy[ idx ] = alpha * dx[ idx ] + beta * dy[ idx ];
}
}
}
/**
Purpose
-------
This routine computes Y = alpha * X + beta * Y on the GPU.
The input format is magma_s_matrix. It can handle both,
dense matrix (vector block) and CSR matrices. For the latter,
it interfaces the cuSPARSE library.
Arguments
---------
@param[in]
alpha float
scalar multiplier.
@param[in]
X magma_s_matrix
input/output matrix Y.
@param[in]
beta float
scalar multiplier.
@param[in,out]
Y magma_s_matrix*
input matrix X.
@param[in]
queue magma_queue_t
Queue to execute in.
@ingroup magmasparse_sblas
********************************************************************/
extern "C"
magma_int_t
magma_sgeaxpy(
float alpha,
magma_s_matrix X,
float beta,
magma_s_matrix *Y,
magma_queue_t queue )
{
int m = X.num_rows;
int n = X.num_cols;
magma_s_matrix C={Magma_CSR};
if( X.storage_type == Magma_DENSE && Y->storage_type == Magma_DENSE ){
dim3 grid( magma_ceildiv( m, BLOCK_SIZE ) );
magma_int_t threads = BLOCK_SIZE;
sgeaxpy_kernel<<< grid, threads, 0, queue->cuda_stream() >>>
( m, n, alpha, X.dval, beta, Y->dval );
} else if( X.storage_type == Magma_CSR && Y->storage_type == Magma_CSR ) {
magma_scuspaxpy( &alpha, X, &beta, *Y, &C, queue );
magma_smfree( Y, queue );
magma_smtransfer( C, Y, Magma_DEV, Magma_DEV, queue );
magma_smfree( &C, queue );
} else {
printf("%% error: matrix addition only supported for DENSE and CSR format.\n");
}
return MAGMA_SUCCESS;
}
|
36b18d8eb7c0ec3c045e57a9acfaed72649c87d6.hip | // !!! This is a file automatically generated by hipify!!!
#include <hip/hip_runtime.h>
#include <hip/hip_runtime.h>
#include "kernels_hip.cuh"
#include "device_launch_parameters.h"
#include "device_helpers.hpp"
namespace emida
{
template<typename T, typename RES>
__global__ void cross_corr(
const T* __restrict__ pics,
const T* __restrict__ ref,
RES* __restrict__ res,
size2_t size,
size2_t res_size,
esize_t ref_slices,
esize_t batch_size)
{
esize_t whole_x = blockIdx.x * blockDim.x + threadIdx.x;
esize_t cuda_y = blockIdx.y * blockDim.y + threadIdx.y;
//number of picture that this thread computes
esize_t slice_num = whole_x / res_size.x;
if (slice_num >= ref_slices || cuda_y >= res_size.y)
return;
size2_t slice_pos = { whole_x % res_size.x, cuda_y };
esize_t ref_num = slice_num % ref_slices;
size2_t r = (res_size - 1) / 2;
vec2<int> shift = { (int)slice_pos.x - (int)r.x, (int)slice_pos.y - (int)r.y };
ref += ref_num * size.area();
pics += slice_num * size.area();
res += slice_num * res_size.area();
for (esize_t i = 0; i < batch_size; ++i)
{
esize_t x_end = min(size.x - shift.x, size.x);
esize_t y_end = min(size.y - shift.y, size.y);
RES sum = 0;
for (esize_t y = max(-shift.y, 0); y < y_end; ++y)
{
for (esize_t x = max(-shift.x, 0); x < x_end; ++x)
{
int x_shifted = x + shift.x;
int y_shifted = y + shift.y;
sum += pics[y_shifted * size.x + x_shifted] * ref[y * size.x + x];
}
}
res[slice_pos.pos(res_size.x)] = sum;
pics += ref_slices * size.area();
res += ref_slices * res_size.area();
}
}
template<typename T, typename RES>
void run_cross_corr(const T* pic_a, const T* pic_b, RES* res, size2_t size, size2_t res_size, esize_t ref_slices, esize_t batch_size)
{
dim3 block_size(16, 16);
dim3 grid_size(div_up(res_size.x * ref_slices, block_size.x), div_up(res_size.y, block_size.y));
hipLaunchKernelGGL(( cross_corr<T, RES>) , dim3(grid_size), dim3(block_size), 0, 0, pic_a, pic_b, res, size, res_size, ref_slices, batch_size);
}
template void run_cross_corr<int, int>(
const int*,
const int*,
int* res,
size2_t size,
size2_t res_size,
esize_t,
esize_t);
template void run_cross_corr<double, double>(
const double*,
const double*,
double* res,
size2_t size,
size2_t res_size,
esize_t,
esize_t);
template void run_cross_corr<float, float>(
const float*,
const float*,
float* res,
size2_t size,
size2_t res_size,
esize_t,
esize_t);
//*******************************************************************************************************************************************************
// Following are experiments with optimization of cross correlation, currently unused
template<int k, typename T, typename RES>
__device__ __inline__ void compute(const T* __restrict__ pics, const T* __restrict__ ref, RES* __restrict__ res, size2_t size, size2_t res_size, size2_t slice_pos, vec2<int> shift)
{
esize_t x_end = shift.x < 0 ? size.x : size.x - shift.x;
esize_t y_end = shift.y < 0 ? size.y : size.y - shift.y;
//control flow divergency in following fors??
RES sum[k];
#pragma unroll
for (esize_t i = 0; i < k; ++i)
sum[i] = 0;
for (esize_t y = shift.y >= 0 ? 0 : -shift.y; y < y_end; ++y)
{
for (esize_t x = shift.x >= 0 ? 0 : -shift.x; x < x_end; ++x)
{
int x_shifted = x + shift.x;
int y_shifted = y + shift.y;
for (esize_t i = 0; i < k; ++i)
sum[i] += pics[y_shifted * size.x + x_shifted + i] * ref[y * size.x + x];
}
}
#pragma unroll
for (esize_t i = 0; i < k; ++i)
res[slice_pos.y * res_size.x + slice_pos.x + i] = sum[i];
}
template<>
__device__ __inline__ void compute<2, double, double>(const double* __restrict__ pics, const double* __restrict__ ref, double* __restrict__ res, size2_t size, size2_t res_size, size2_t slice_pos, vec2<int> shift)
{
esize_t x_start = shift.x < 0 ? -shift.x : 0;
esize_t x_end = shift.x < 0 ? size.x : size.x - shift.x;
esize_t y_end = shift.y < 0 ? size.y : size.y - shift.y;
//control flow divergency in following fors??
double sum[2];
sum[0] = 0;
sum[1] = 0;
for (esize_t y = shift.y >= 0 ? 0 : -shift.y; y < y_end; ++y)
{
int y_shifted = y + shift.y;
double cach[2];
cach[0] = pics[y_shifted * size.x + x_start + shift.x];
int x_shifted = x_start + shift.x;
for (esize_t x = x_start; x < x_end; x+=2)
{
++x_shifted;
cach[1] = pics[y_shifted * size.x + x_shifted];
sum[0] += cach[0] * ref[y * size.x + x];
sum[1] += cach[1] * ref[y * size.x + x];
++x_shifted;
cach[0] = pics[y_shifted * size.x + x_shifted];
sum[0] += cach[1] * ref[y * size.x + x + 1];
sum[1] += cach[0] * ref[y * size.x + x + 1];
}
}
res[slice_pos.y * res_size.x + slice_pos.x] = sum[0];
res[slice_pos.y * res_size.x + slice_pos.x + 1] = sum[1];
}
template<>
__device__ __inline__ void compute<3, double, double>(const double* __restrict__ pics, const double* __restrict__ ref, double* __restrict__ res, size2_t size, size2_t res_size, size2_t slice_pos, vec2<int> shift)
{
esize_t x_end = shift.x < 0 ? size.x : size.x - shift.x;
esize_t y_end = shift.y < 0 ? size.y : size.y - shift.y;
//control flow divergency in following fors??
double sum[3];
sum[0] = 0;
sum[1] = 0;
sum[3] = 0;
esize_t x_start = shift.x >= 0 ? 0 : -shift.x;
for (esize_t y = shift.y >= 0 ? 0 : -shift.y; y < y_end; ++y)
{
int y_shifted = y + shift.y;
double cach[3];
cach[0] = pics[y_shifted * size.x + x_start + shift.x];
cach[1] = pics[y_shifted * size.x + x_start + shift.x + 1];
int x_shifted = x_start + shift.x;
for (esize_t x = x_start; x < x_end; x += 3)
{
++x_shifted;
cach[2] = pics[y_shifted * size.x + x_shifted];
//sum[0] += pics[y_shifted * size.x + x_shifted] * ref[y * size.x + x];
//sum[1] += pics[y_shifted * size.x + x_shifted + 1] * ref[y * size.x + x];
sum[0] += cach[0] * ref[y * size.x + x];
sum[1] += cach[1] * ref[y * size.x + x];
sum[2] += cach[2] * ref[y * size.x + x];
++x_shifted;
cach[0] = pics[y_shifted * size.x + x_shifted];
sum[0] += cach[1] * ref[y * size.x + x + 1];
sum[1] += cach[2] * ref[y * size.x + x + 1];
sum[2] += cach[0] * ref[y * size.x + x + 1];
++x_shifted;
cach[1] = pics[y_shifted * size.x + x_shifted];
sum[0] += cach[2] * ref[y * size.x + x + 2];
sum[1] += cach[0] * ref[y * size.x + x + 2];
sum[2] += cach[1] * ref[y * size.x + x + 2];
}
}
res[slice_pos.y * res_size.x + slice_pos.x] = sum[0];
res[slice_pos.y * res_size.x + slice_pos.x + 1] = sum[1];
res[slice_pos.y * res_size.x + slice_pos.x + 2] = sum[2];
}
template<typename T, typename RES>
__device__ __inline__ void compute_dyn(const T* __restrict__ pics, const T* __restrict__ ref, RES* __restrict__ res, size2_t size, size2_t res_size, size2_t slice_pos, vec2<int> shift, int k)
{
switch (k)
{
case 1:
compute<1>(pics, ref, res, size, res_size, slice_pos, shift);
break;
case 2:
compute<2>(pics, ref, res, size, res_size, slice_pos, shift);
break;
case 3:
compute<3>(pics, ref, res, size, res_size, slice_pos, shift);
break;
case 4:
compute<4>(pics, ref, res, size, res_size, slice_pos, shift);
break;
case 5:
compute<5>(pics, ref, res, size, res_size, slice_pos, shift);
break;
case 6:
compute<6>(pics, ref, res, size, res_size, slice_pos, shift);
break;
default:
printf("%d", k);
}
}
template<int k, typename T, typename RES>
__global__ void cross_corr_r(
const T* __restrict__ pics,
const T* __restrict__ ref,
RES* __restrict__ res,
size2_t size,
size2_t res_size,
esize_t ref_slices,
esize_t batch_size)
{
esize_t whole_x = blockIdx.x * blockDim.x + threadIdx.x;
esize_t cuda_y = blockIdx.y * blockDim.y + threadIdx.y;
//number of picture that this thread computes
esize_t slice_num = whole_x / res_size.x;
if (slice_num >= ref_slices || cuda_y >= res_size.y)
return;
size2_t slice_pos = { (whole_x % div_up(res_size.x, k))*k, cuda_y };
esize_t ref_num = slice_num % ref_slices;
size2_t r = (res_size - 1) / 2;
vec2<int> shift = { (int)slice_pos.x - (int)r.x, (int)slice_pos.y - (int)r.y };
ref += ref_num * size.area();
pics += slice_num * size.area();
res += slice_num * res_size.area();
for (esize_t i = 0; i < batch_size; ++i)
{
//printf("[%d %d] %d %d\n", (int)slice_pos.x, (int)slice_pos.y, k, (int)res_size.x);
if ((int)slice_pos.x + k > (int)res_size.x)
compute_dyn(pics, ref, res, size, res_size, slice_pos, shift, (int)res_size.x - (int)slice_pos.x);
else
compute<k, T, RES>(pics, ref, res, size, res_size, slice_pos, shift);
pics += ref_slices * size.area();
res += ref_slices * res_size.area();
}
}
template<typename T, typename RES>
void run_cross_corr_r(const T* pic_a, const T* pic_b, RES* res, size2_t size, size2_t res_size, esize_t ref_slices, esize_t batch_size)
{
constexpr int k = 2;
dim3 block_size(16, 16);
dim3 grid_size(div_up(div_up(res_size.x, k) * ref_slices, block_size.x), div_up(res_size.y, block_size.y));
hipLaunchKernelGGL(( cross_corr_r<k, T, RES>) , dim3(grid_size), dim3(block_size) , 0, 0, pic_a, pic_b, res, size, res_size, ref_slices, batch_size);
}
template void run_cross_corr_r<double, double>(
const double*,
const double*,
double* res,
size2_t size,
size2_t res_size,
esize_t,
esize_t);
template void run_cross_corr_r<float, float>(
const float*,
const float*,
float* res,
size2_t size,
size2_t res_size,
esize_t,
esize_t);
//*******************************************************************************************************************************************************
template<typename T>
__device__ __inline__ void copy_subregion(const T * __restrict__ src, size2_t src_size, T* __restrict__ dest, size2_t dest_size, size2_t region_pos)
{
for (esize_t y = threadIdx.y; y < dest_size.y; y += blockDim.y)
for (esize_t x = threadIdx.x; x < dest_size.x; x += blockDim.x)
{
dest[y * dest_size.x + x] = x + region_pos.x < src_size.x && y + region_pos.y < src_size.y
? src[(y + region_pos.y) * src_size.x + (x + region_pos.x)]
: 0;
}
}
//*******************************************************************************************************************************************************
constexpr int stripe_size = 8;
template<typename T, typename RES>
__global__ void cross_corr_opt(
const T* __restrict__ pics,
const T* __restrict__ ref,
RES* __restrict__ res,
int2_t size,
int2_t res_size,
esize_t ref_slices,
esize_t batch_size)
{
esize_t slice_num = blockIdx.x / res_size.y;
esize_t res_y = blockIdx.x % res_size.y;
ref += slice_num * size.area();
pics += slice_num * size.area();
res += slice_num * res_size.area();
T* smem = shared_memory_proxy<T>();
T* res_line = smem;
for (int i = threadIdx.x; i < res_size.x; i += blockDim.x)
{
res_line[i] = 0;
}
__syncthreads();
int2_t res_r = (res_size - 1) / 2;
int y_shift = res_y - (int)res_r.y;
int y_begin = y_shift >= 0 ? 0 : -y_shift;
int warp_idx = threadIdx.x / warpSize;
int lane_idx = threadIdx.x % warpSize;
int team_size = warpSize / stripe_size;
int team_idx = lane_idx / team_size;
int team_lane = lane_idx % team_size;
//T sums[30];
for (int s = 0; s < size.y - abs(y_shift); s += stripe_size)
{
for (int x_shift = -res_r.x + warp_idx; x_shift <= res_r.x; x_shift += blockDim.x / warpSize)
{
T sum = 0;
int x_end = x_shift < 0 ? size.x : size.x - x_shift;
int x_begin = x_shift < 0 ? -x_shift : 0;
int y = s + y_begin + team_idx;
int y_shifted = y + y_shift;
if(y < size.y && y_shifted < size.y)
for (int x = x_begin + team_lane; x < x_end; x += team_size)
{
int x_shifted = x + x_shift;
//if(blockIdx.x < 1)
//printf("%d %d %d %d %d [%d %d] [%d %d] [%d %d] %d\n", blockIdx.x, s, warp_idx, lane_idx, y_begin, x_shift, y_shift, x, y, x_shifted, y_shifted, x_end);
sum += pics[y_shifted * size.x + x_shifted] * ref[y * size.x + x];
}
//printf("%d %d %d %d %d [%d %d] %f %d\n", blockIdx.x, s, warp_idx, lane_idx, y_begin, x_shift, y_shift, sum, x_shift + r.x);
for (int offset = warpSize / 2; offset > 0; offset /= 2)
sum += __shfl_down_sync(0xFFFFFFFF, sum, offset);
if(lane_idx == 0)
*(res_line + x_shift + res_r.x) += sum;
}
}
__syncthreads();
for (int i = threadIdx.x; i < res_size.x; i += blockDim.x)
{
res[res_size.x * res_y + i] = res_line[i];
}
}
template<typename T, typename RES>
void run_cross_corr_opt(
const T* pics,
const T* ref,
RES* res,
size2_t size,
size2_t res_size,
esize_t ref_slices,
esize_t batch_size)
{
esize_t block_dim = 256;
esize_t grid_size = res_size.y * ref_slices;
esize_t shared_mem_size = res_size.x * sizeof(T) * 2;
hipLaunchKernelGGL(( cross_corr_opt<T, RES>) , dim3(grid_size), dim3(block_dim), shared_mem_size , 0, pics, ref, res, { (int)size.x, (int)size.y }, { (int)res_size.x, (int)res_size.y }, ref_slices, batch_size);
}
template void run_cross_corr_opt<double, double>(
const double*,
const double*,
double* res,
size2_t size,
size2_t res_size,
esize_t,
esize_t);
template void run_cross_corr_opt<float, float>(
const float*,
const float*,
float* res,
size2_t size,
size2_t res_size,
esize_t,
esize_t);
//*******************************************************************************************************************************************************
constexpr int stripe_esize_tr = 32;
template<typename T, typename RES>
__global__ void cross_corr_opt_tr(
const T* __restrict__ pics,
const T* __restrict__ ref,
RES* __restrict__ res,
int2_t size,
int2_t res_size,
esize_t ref_slices,
esize_t batch_size)
{
esize_t slice_num = blockIdx.x / res_size.x;
esize_t res_x = blockIdx.x % res_size.x;
ref += slice_num * size.area();
pics += slice_num * size.area();
res += slice_num * res_size.area();
T* smem = shared_memory_proxy<T>();
T* res_line = smem;
for (int i = threadIdx.x; i < res_size.y; i += blockDim.x)
{
res_line[i] = 0;
}
__syncthreads();
int2_t res_r = (res_size - 1) / 2;
int x_shift = res_x - (int)res_r.x;
int x_begin = x_shift >= 0 ? 0 : -x_shift;
int warp_idx = threadIdx.x / warpSize;
int lane_idx = threadIdx.x % warpSize;
int team_size = warpSize / stripe_esize_tr;
int team_idx = lane_idx / team_size;
//int team_lane = lane_idx % team_size;
constexpr int k = 1;
//T sums[30];
for (int s = 0; s < size.x - abs(x_shift); s += stripe_esize_tr)
{
for (int y_shift = -res_r.y + warp_idx*k; y_shift <= res_r.y; y_shift += blockDim.x / warpSize * k)
{
T sum[k];
//T cach[k];
#pragma unroll
for (int i = 0; i < k; ++i)
sum[i] = 0;
int y_end = y_shift < 0 ? size.y : size.y - y_shift;
int y_begin = y_shift < 0 ? -y_shift : 0;
int x = s + x_begin + team_idx;
int x_shifted = x + x_shift;
if (x < size.x && x_shifted < size.x)
{
for (int y = y_begin; y < y_end; y += 1)
{
int y_shifted = y + y_shift;
//if(blockIdx.x < 1)
//printf("%d %d %d %d %d [%d %d] [%d %d] [%d %d] %d\n", blockIdx.x, s, warp_idx, lane_idx, y_begin, x_shift, y_shift, x, y, x_shifted, y_shifted, x_end);
#pragma unroll
for (int i = 0; i < k; ++i)
sum[i] += pics[y_shifted * size.x + x_shifted + i] * ref[y * size.x + x];
/*++y_shifted;
cach[2] = pics[y_shifted * size.x + x_shifted];
//sum[0] += pics[y_shifted * size.x + x_shifted] * ref[y * size.x + x];
//sum[1] += pics[y_shifted * size.x + x_shifted + 1] * ref[y * size.x + x];
sum[0] += cach[0] * ref[y * size.x + x];
sum[1] += cach[1] * ref[y * size.x + x];
sum[2] += cach[2] * ref[y * size.x + x];
++y_shifted;
cach[0] = pics[y_shifted * size.x + x_shifted];
sum[0] += cach[1] * ref[y * size.x + x + 1];
sum[1] += cach[2] * ref[y * size.x + x + 1];
sum[2] += cach[0] * ref[y * size.x + x + 1];
++y_shifted;
cach[1] = pics[y_shifted * size.x + x_shifted];
sum[0] += cach[2] * ref[y * size.x + x + 2];
sum[1] += cach[0] * ref[y * size.x + x + 2];
sum[2] += cach[1] * ref[y * size.x + x + 2];*/
}
}
//printf("%d %d %d %d %d [%d %d] %f %d\n", blockIdx.x, s, warp_idx, lane_idx, y_begin, x_shift, y_shift, sum, x_shift + r.x);
for (int offset = warpSize / 2; offset > 0; offset /= 2)
sum[0] += __shfl_down_sync(0xFFFFFFFF, sum[0], offset);
if (lane_idx == 0)
{
#pragma unroll
for (int i = 0; i < k; ++i)
*(res_line + y_shift + res_r.x + i) += sum[i];
}
}
}
__syncthreads();
for (int i = threadIdx.x; i < res_size.y; i += blockDim.x)
{
res[res_size.x * i + res_x] = res_line[i];
}
}
template<typename T, typename RES>
void run_cross_corr_opt_tr(
const T* pics,
const T* ref,
RES* res,
size2_t size,
size2_t res_size,
esize_t ref_slices,
esize_t batch_size)
{
esize_t block_dim = 256;
esize_t grid_size = res_size.y * ref_slices;
esize_t shared_mem_size = res_size.x * sizeof(T) * 2;
cross_corr_opt_tr<T, RES> << <grid_size, block_dim, shared_mem_size >> > (pics, ref, res, { (int)size.x, (int)size.y }, { (int)res_size.x, (int)res_size.y }, ref_slices, batch_size);
}
template void run_cross_corr_opt_tr<double, double>(
const double*,
const double*,
double* res,
size2_t size,
size2_t res_size,
esize_t,
esize_t);
template void run_cross_corr_opt_tr<float, float>(
const float*,
const float*,
float* res,
size2_t size,
size2_t res_size,
esize_t,
esize_t);
} | 36b18d8eb7c0ec3c045e57a9acfaed72649c87d6.cu | #include <cuda.h>
#include <cuda_runtime.h>
#include "kernels.cuh"
#include "device_launch_parameters.h"
#include "device_helpers.hpp"
namespace emida
{
template<typename T, typename RES>
__global__ void cross_corr(
const T* __restrict__ pics,
const T* __restrict__ ref,
RES* __restrict__ res,
size2_t size,
size2_t res_size,
esize_t ref_slices,
esize_t batch_size)
{
esize_t whole_x = blockIdx.x * blockDim.x + threadIdx.x;
esize_t cuda_y = blockIdx.y * blockDim.y + threadIdx.y;
//number of picture that this thread computes
esize_t slice_num = whole_x / res_size.x;
if (slice_num >= ref_slices || cuda_y >= res_size.y)
return;
size2_t slice_pos = { whole_x % res_size.x, cuda_y };
esize_t ref_num = slice_num % ref_slices;
size2_t r = (res_size - 1) / 2;
vec2<int> shift = { (int)slice_pos.x - (int)r.x, (int)slice_pos.y - (int)r.y };
ref += ref_num * size.area();
pics += slice_num * size.area();
res += slice_num * res_size.area();
for (esize_t i = 0; i < batch_size; ++i)
{
esize_t x_end = min(size.x - shift.x, size.x);
esize_t y_end = min(size.y - shift.y, size.y);
RES sum = 0;
for (esize_t y = max(-shift.y, 0); y < y_end; ++y)
{
for (esize_t x = max(-shift.x, 0); x < x_end; ++x)
{
int x_shifted = x + shift.x;
int y_shifted = y + shift.y;
sum += pics[y_shifted * size.x + x_shifted] * ref[y * size.x + x];
}
}
res[slice_pos.pos(res_size.x)] = sum;
pics += ref_slices * size.area();
res += ref_slices * res_size.area();
}
}
template<typename T, typename RES>
void run_cross_corr(const T* pic_a, const T* pic_b, RES* res, size2_t size, size2_t res_size, esize_t ref_slices, esize_t batch_size)
{
dim3 block_size(16, 16);
dim3 grid_size(div_up(res_size.x * ref_slices, block_size.x), div_up(res_size.y, block_size.y));
cross_corr<T, RES> <<<grid_size, block_size>>> (pic_a, pic_b, res, size, res_size, ref_slices, batch_size);
}
template void run_cross_corr<int, int>(
const int*,
const int*,
int* res,
size2_t size,
size2_t res_size,
esize_t,
esize_t);
template void run_cross_corr<double, double>(
const double*,
const double*,
double* res,
size2_t size,
size2_t res_size,
esize_t,
esize_t);
template void run_cross_corr<float, float>(
const float*,
const float*,
float* res,
size2_t size,
size2_t res_size,
esize_t,
esize_t);
//*******************************************************************************************************************************************************
// Following are experiments with optimization of cross correlation, currently unused
template<int k, typename T, typename RES>
__device__ __inline__ void compute(const T* __restrict__ pics, const T* __restrict__ ref, RES* __restrict__ res, size2_t size, size2_t res_size, size2_t slice_pos, vec2<int> shift)
{
esize_t x_end = shift.x < 0 ? size.x : size.x - shift.x;
esize_t y_end = shift.y < 0 ? size.y : size.y - shift.y;
//control flow divergency in following fors??
RES sum[k];
#pragma unroll
for (esize_t i = 0; i < k; ++i)
sum[i] = 0;
for (esize_t y = shift.y >= 0 ? 0 : -shift.y; y < y_end; ++y)
{
for (esize_t x = shift.x >= 0 ? 0 : -shift.x; x < x_end; ++x)
{
int x_shifted = x + shift.x;
int y_shifted = y + shift.y;
for (esize_t i = 0; i < k; ++i)
sum[i] += pics[y_shifted * size.x + x_shifted + i] * ref[y * size.x + x];
}
}
#pragma unroll
for (esize_t i = 0; i < k; ++i)
res[slice_pos.y * res_size.x + slice_pos.x + i] = sum[i];
}
template<>
__device__ __inline__ void compute<2, double, double>(const double* __restrict__ pics, const double* __restrict__ ref, double* __restrict__ res, size2_t size, size2_t res_size, size2_t slice_pos, vec2<int> shift)
{
esize_t x_start = shift.x < 0 ? -shift.x : 0;
esize_t x_end = shift.x < 0 ? size.x : size.x - shift.x;
esize_t y_end = shift.y < 0 ? size.y : size.y - shift.y;
//control flow divergency in following fors??
double sum[2];
sum[0] = 0;
sum[1] = 0;
for (esize_t y = shift.y >= 0 ? 0 : -shift.y; y < y_end; ++y)
{
int y_shifted = y + shift.y;
double cach[2];
cach[0] = pics[y_shifted * size.x + x_start + shift.x];
int x_shifted = x_start + shift.x;
for (esize_t x = x_start; x < x_end; x+=2)
{
++x_shifted;
cach[1] = pics[y_shifted * size.x + x_shifted];
sum[0] += cach[0] * ref[y * size.x + x];
sum[1] += cach[1] * ref[y * size.x + x];
++x_shifted;
cach[0] = pics[y_shifted * size.x + x_shifted];
sum[0] += cach[1] * ref[y * size.x + x + 1];
sum[1] += cach[0] * ref[y * size.x + x + 1];
}
}
res[slice_pos.y * res_size.x + slice_pos.x] = sum[0];
res[slice_pos.y * res_size.x + slice_pos.x + 1] = sum[1];
}
template<>
__device__ __inline__ void compute<3, double, double>(const double* __restrict__ pics, const double* __restrict__ ref, double* __restrict__ res, size2_t size, size2_t res_size, size2_t slice_pos, vec2<int> shift)
{
esize_t x_end = shift.x < 0 ? size.x : size.x - shift.x;
esize_t y_end = shift.y < 0 ? size.y : size.y - shift.y;
//control flow divergency in following fors??
double sum[3];
sum[0] = 0;
sum[1] = 0;
sum[3] = 0;
esize_t x_start = shift.x >= 0 ? 0 : -shift.x;
for (esize_t y = shift.y >= 0 ? 0 : -shift.y; y < y_end; ++y)
{
int y_shifted = y + shift.y;
double cach[3];
cach[0] = pics[y_shifted * size.x + x_start + shift.x];
cach[1] = pics[y_shifted * size.x + x_start + shift.x + 1];
int x_shifted = x_start + shift.x;
for (esize_t x = x_start; x < x_end; x += 3)
{
++x_shifted;
cach[2] = pics[y_shifted * size.x + x_shifted];
//sum[0] += pics[y_shifted * size.x + x_shifted] * ref[y * size.x + x];
//sum[1] += pics[y_shifted * size.x + x_shifted + 1] * ref[y * size.x + x];
sum[0] += cach[0] * ref[y * size.x + x];
sum[1] += cach[1] * ref[y * size.x + x];
sum[2] += cach[2] * ref[y * size.x + x];
++x_shifted;
cach[0] = pics[y_shifted * size.x + x_shifted];
sum[0] += cach[1] * ref[y * size.x + x + 1];
sum[1] += cach[2] * ref[y * size.x + x + 1];
sum[2] += cach[0] * ref[y * size.x + x + 1];
++x_shifted;
cach[1] = pics[y_shifted * size.x + x_shifted];
sum[0] += cach[2] * ref[y * size.x + x + 2];
sum[1] += cach[0] * ref[y * size.x + x + 2];
sum[2] += cach[1] * ref[y * size.x + x + 2];
}
}
res[slice_pos.y * res_size.x + slice_pos.x] = sum[0];
res[slice_pos.y * res_size.x + slice_pos.x + 1] = sum[1];
res[slice_pos.y * res_size.x + slice_pos.x + 2] = sum[2];
}
template<typename T, typename RES>
__device__ __inline__ void compute_dyn(const T* __restrict__ pics, const T* __restrict__ ref, RES* __restrict__ res, size2_t size, size2_t res_size, size2_t slice_pos, vec2<int> shift, int k)
{
switch (k)
{
case 1:
compute<1>(pics, ref, res, size, res_size, slice_pos, shift);
break;
case 2:
compute<2>(pics, ref, res, size, res_size, slice_pos, shift);
break;
case 3:
compute<3>(pics, ref, res, size, res_size, slice_pos, shift);
break;
case 4:
compute<4>(pics, ref, res, size, res_size, slice_pos, shift);
break;
case 5:
compute<5>(pics, ref, res, size, res_size, slice_pos, shift);
break;
case 6:
compute<6>(pics, ref, res, size, res_size, slice_pos, shift);
break;
default:
printf("%d", k);
}
}
template<int k, typename T, typename RES>
__global__ void cross_corr_r(
const T* __restrict__ pics,
const T* __restrict__ ref,
RES* __restrict__ res,
size2_t size,
size2_t res_size,
esize_t ref_slices,
esize_t batch_size)
{
esize_t whole_x = blockIdx.x * blockDim.x + threadIdx.x;
esize_t cuda_y = blockIdx.y * blockDim.y + threadIdx.y;
//number of picture that this thread computes
esize_t slice_num = whole_x / res_size.x;
if (slice_num >= ref_slices || cuda_y >= res_size.y)
return;
size2_t slice_pos = { (whole_x % div_up(res_size.x, k))*k, cuda_y };
esize_t ref_num = slice_num % ref_slices;
size2_t r = (res_size - 1) / 2;
vec2<int> shift = { (int)slice_pos.x - (int)r.x, (int)slice_pos.y - (int)r.y };
ref += ref_num * size.area();
pics += slice_num * size.area();
res += slice_num * res_size.area();
for (esize_t i = 0; i < batch_size; ++i)
{
//printf("[%d %d] %d %d\n", (int)slice_pos.x, (int)slice_pos.y, k, (int)res_size.x);
if ((int)slice_pos.x + k > (int)res_size.x)
compute_dyn(pics, ref, res, size, res_size, slice_pos, shift, (int)res_size.x - (int)slice_pos.x);
else
compute<k, T, RES>(pics, ref, res, size, res_size, slice_pos, shift);
pics += ref_slices * size.area();
res += ref_slices * res_size.area();
}
}
template<typename T, typename RES>
void run_cross_corr_r(const T* pic_a, const T* pic_b, RES* res, size2_t size, size2_t res_size, esize_t ref_slices, esize_t batch_size)
{
constexpr int k = 2;
dim3 block_size(16, 16);
dim3 grid_size(div_up(div_up(res_size.x, k) * ref_slices, block_size.x), div_up(res_size.y, block_size.y));
cross_corr_r<k, T, RES> <<<grid_size, block_size >>> (pic_a, pic_b, res, size, res_size, ref_slices, batch_size);
}
template void run_cross_corr_r<double, double>(
const double*,
const double*,
double* res,
size2_t size,
size2_t res_size,
esize_t,
esize_t);
template void run_cross_corr_r<float, float>(
const float*,
const float*,
float* res,
size2_t size,
size2_t res_size,
esize_t,
esize_t);
//*******************************************************************************************************************************************************
template<typename T>
__device__ __inline__ void copy_subregion(const T * __restrict__ src, size2_t src_size, T* __restrict__ dest, size2_t dest_size, size2_t region_pos)
{
for (esize_t y = threadIdx.y; y < dest_size.y; y += blockDim.y)
for (esize_t x = threadIdx.x; x < dest_size.x; x += blockDim.x)
{
dest[y * dest_size.x + x] = x + region_pos.x < src_size.x && y + region_pos.y < src_size.y
? src[(y + region_pos.y) * src_size.x + (x + region_pos.x)]
: 0;
}
}
//*******************************************************************************************************************************************************
constexpr int stripe_size = 8;
template<typename T, typename RES>
__global__ void cross_corr_opt(
const T* __restrict__ pics,
const T* __restrict__ ref,
RES* __restrict__ res,
int2_t size,
int2_t res_size,
esize_t ref_slices,
esize_t batch_size)
{
esize_t slice_num = blockIdx.x / res_size.y;
esize_t res_y = blockIdx.x % res_size.y;
ref += slice_num * size.area();
pics += slice_num * size.area();
res += slice_num * res_size.area();
T* smem = shared_memory_proxy<T>();
T* res_line = smem;
for (int i = threadIdx.x; i < res_size.x; i += blockDim.x)
{
res_line[i] = 0;
}
__syncthreads();
int2_t res_r = (res_size - 1) / 2;
int y_shift = res_y - (int)res_r.y;
int y_begin = y_shift >= 0 ? 0 : -y_shift;
int warp_idx = threadIdx.x / warpSize;
int lane_idx = threadIdx.x % warpSize;
int team_size = warpSize / stripe_size;
int team_idx = lane_idx / team_size;
int team_lane = lane_idx % team_size;
//T sums[30];
for (int s = 0; s < size.y - abs(y_shift); s += stripe_size)
{
for (int x_shift = -res_r.x + warp_idx; x_shift <= res_r.x; x_shift += blockDim.x / warpSize)
{
T sum = 0;
int x_end = x_shift < 0 ? size.x : size.x - x_shift;
int x_begin = x_shift < 0 ? -x_shift : 0;
int y = s + y_begin + team_idx;
int y_shifted = y + y_shift;
if(y < size.y && y_shifted < size.y)
for (int x = x_begin + team_lane; x < x_end; x += team_size)
{
int x_shifted = x + x_shift;
//if(blockIdx.x < 1)
//printf("%d %d %d %d %d [%d %d] [%d %d] [%d %d] %d\n", blockIdx.x, s, warp_idx, lane_idx, y_begin, x_shift, y_shift, x, y, x_shifted, y_shifted, x_end);
sum += pics[y_shifted * size.x + x_shifted] * ref[y * size.x + x];
}
//printf("%d %d %d %d %d [%d %d] %f %d\n", blockIdx.x, s, warp_idx, lane_idx, y_begin, x_shift, y_shift, sum, x_shift + r.x);
for (int offset = warpSize / 2; offset > 0; offset /= 2)
sum += __shfl_down_sync(0xFFFFFFFF, sum, offset);
if(lane_idx == 0)
*(res_line + x_shift + res_r.x) += sum;
}
}
__syncthreads();
for (int i = threadIdx.x; i < res_size.x; i += blockDim.x)
{
res[res_size.x * res_y + i] = res_line[i];
}
}
template<typename T, typename RES>
void run_cross_corr_opt(
const T* pics,
const T* ref,
RES* res,
size2_t size,
size2_t res_size,
esize_t ref_slices,
esize_t batch_size)
{
esize_t block_dim = 256;
esize_t grid_size = res_size.y * ref_slices;
esize_t shared_mem_size = res_size.x * sizeof(T) * 2;
cross_corr_opt<T, RES> <<<grid_size, block_dim, shared_mem_size >>> (pics, ref, res, { (int)size.x, (int)size.y }, { (int)res_size.x, (int)res_size.y }, ref_slices, batch_size);
}
template void run_cross_corr_opt<double, double>(
const double*,
const double*,
double* res,
size2_t size,
size2_t res_size,
esize_t,
esize_t);
template void run_cross_corr_opt<float, float>(
const float*,
const float*,
float* res,
size2_t size,
size2_t res_size,
esize_t,
esize_t);
//*******************************************************************************************************************************************************
constexpr int stripe_esize_tr = 32;
template<typename T, typename RES>
__global__ void cross_corr_opt_tr(
const T* __restrict__ pics,
const T* __restrict__ ref,
RES* __restrict__ res,
int2_t size,
int2_t res_size,
esize_t ref_slices,
esize_t batch_size)
{
esize_t slice_num = blockIdx.x / res_size.x;
esize_t res_x = blockIdx.x % res_size.x;
ref += slice_num * size.area();
pics += slice_num * size.area();
res += slice_num * res_size.area();
T* smem = shared_memory_proxy<T>();
T* res_line = smem;
for (int i = threadIdx.x; i < res_size.y; i += blockDim.x)
{
res_line[i] = 0;
}
__syncthreads();
int2_t res_r = (res_size - 1) / 2;
int x_shift = res_x - (int)res_r.x;
int x_begin = x_shift >= 0 ? 0 : -x_shift;
int warp_idx = threadIdx.x / warpSize;
int lane_idx = threadIdx.x % warpSize;
int team_size = warpSize / stripe_esize_tr;
int team_idx = lane_idx / team_size;
//int team_lane = lane_idx % team_size;
constexpr int k = 1;
//T sums[30];
for (int s = 0; s < size.x - abs(x_shift); s += stripe_esize_tr)
{
for (int y_shift = -res_r.y + warp_idx*k; y_shift <= res_r.y; y_shift += blockDim.x / warpSize * k)
{
T sum[k];
//T cach[k];
#pragma unroll
for (int i = 0; i < k; ++i)
sum[i] = 0;
int y_end = y_shift < 0 ? size.y : size.y - y_shift;
int y_begin = y_shift < 0 ? -y_shift : 0;
int x = s + x_begin + team_idx;
int x_shifted = x + x_shift;
if (x < size.x && x_shifted < size.x)
{
for (int y = y_begin; y < y_end; y += 1)
{
int y_shifted = y + y_shift;
//if(blockIdx.x < 1)
//printf("%d %d %d %d %d [%d %d] [%d %d] [%d %d] %d\n", blockIdx.x, s, warp_idx, lane_idx, y_begin, x_shift, y_shift, x, y, x_shifted, y_shifted, x_end);
#pragma unroll
for (int i = 0; i < k; ++i)
sum[i] += pics[y_shifted * size.x + x_shifted + i] * ref[y * size.x + x];
/*++y_shifted;
cach[2] = pics[y_shifted * size.x + x_shifted];
//sum[0] += pics[y_shifted * size.x + x_shifted] * ref[y * size.x + x];
//sum[1] += pics[y_shifted * size.x + x_shifted + 1] * ref[y * size.x + x];
sum[0] += cach[0] * ref[y * size.x + x];
sum[1] += cach[1] * ref[y * size.x + x];
sum[2] += cach[2] * ref[y * size.x + x];
++y_shifted;
cach[0] = pics[y_shifted * size.x + x_shifted];
sum[0] += cach[1] * ref[y * size.x + x + 1];
sum[1] += cach[2] * ref[y * size.x + x + 1];
sum[2] += cach[0] * ref[y * size.x + x + 1];
++y_shifted;
cach[1] = pics[y_shifted * size.x + x_shifted];
sum[0] += cach[2] * ref[y * size.x + x + 2];
sum[1] += cach[0] * ref[y * size.x + x + 2];
sum[2] += cach[1] * ref[y * size.x + x + 2];*/
}
}
//printf("%d %d %d %d %d [%d %d] %f %d\n", blockIdx.x, s, warp_idx, lane_idx, y_begin, x_shift, y_shift, sum, x_shift + r.x);
for (int offset = warpSize / 2; offset > 0; offset /= 2)
sum[0] += __shfl_down_sync(0xFFFFFFFF, sum[0], offset);
if (lane_idx == 0)
{
#pragma unroll
for (int i = 0; i < k; ++i)
*(res_line + y_shift + res_r.x + i) += sum[i];
}
}
}
__syncthreads();
for (int i = threadIdx.x; i < res_size.y; i += blockDim.x)
{
res[res_size.x * i + res_x] = res_line[i];
}
}
template<typename T, typename RES>
void run_cross_corr_opt_tr(
const T* pics,
const T* ref,
RES* res,
size2_t size,
size2_t res_size,
esize_t ref_slices,
esize_t batch_size)
{
esize_t block_dim = 256;
esize_t grid_size = res_size.y * ref_slices;
esize_t shared_mem_size = res_size.x * sizeof(T) * 2;
cross_corr_opt_tr<T, RES> << <grid_size, block_dim, shared_mem_size >> > (pics, ref, res, { (int)size.x, (int)size.y }, { (int)res_size.x, (int)res_size.y }, ref_slices, batch_size);
}
template void run_cross_corr_opt_tr<double, double>(
const double*,
const double*,
double* res,
size2_t size,
size2_t res_size,
esize_t,
esize_t);
template void run_cross_corr_opt_tr<float, float>(
const float*,
const float*,
float* res,
size2_t size,
size2_t res_size,
esize_t,
esize_t);
} |
62aeafa862975bcac22ed3ef4e63505c344b3d00.hip | // !!! This is a file automatically generated by hipify!!!
#include <sys/time.h>
#include <hip/hip_runtime.h>
#include <stdio.h>
// time stamp function in seconds
double getTimeStamp() {
struct timeval tv ;
gettimeofday( &tv, NULL ) ;
return (double) tv.tv_usec/1000000 + tv.tv_sec ;
}
// host side matrix addition
void h_addmat(float *A, float *B, float *C, int nx, int ny){
for (int i =0;i<nx;i++){
for(int j=0;j<ny;j++){
C[i*ny+j] = A[i*ny+j]+B[i*ny+j];
}
}
return;
}
// device-side matrix addition
__global__ void f_addmat( float *A, float *B, float *C, int nx, int ny ){
int ix = threadIdx.x + blockIdx.x*(blockDim.x) ;
int iy = threadIdx.y + blockIdx.y*(blockDim.y) ;
int idx = ix*ny + iy ;
if( (ix<nx) && (iy<ny) ){
C[idx] = A[idx] + B[idx] ;
//printf("Thread %d %d\n",ix,iy);
}
}
void initData(float *M, int x, int y, int width, int flag ){
if(flag)
{
//printf("A\n");
for (int i=0;i<x;i++){
for (int j=0;j<y;j++){
M[i*width+j] = (float)(i+j)/3.0;
}
}
}
else
{
//printf("B\n");
for (int i=0;i<x;i++){
for (int j=0;j<y;j++){
M[i*width+j] = (float)3.14*(i+j) ;
}
}
}
}
int main( int argc, char *argv[] ) {
if (argc!=3){
printf("Error: Invalid number of arguments.\n");
exit(1);
}
int nx = atoi( argv[1] ) ; // should check validity
int ny = atoi( argv[2] ) ; // should check validity
if(nx <=0 || ny <=0){
printf("Error: Dimension lessThanOrEqualto Zero.\n");
exit(1);
}
int my=0;
if((ny%16) != 0){
my = 16 - (ny%16);
}
int noElems = (nx)*(ny+my) ;
int bytes = noElems * sizeof(float) ;
//printf ("%d %d %d %d \n",(nx*ny),(noElems),mx,my);
//float *h_hC = (float *) malloc( bytes ) ; // host result
float *d_A, *d_B, *d_C ;
hipMalloc( (void **) &d_A, bytes ) ;
hipMalloc( (void **) &d_B, bytes ) ;
hipMalloc( (void **) &d_C, bytes ) ;
float *h_hC = (float *) malloc( bytes ) ; // host result
float *h_Ap, *h_Bp, *h_dCp;
hipHostMalloc( (float **) &h_Ap, bytes ) ;
hipHostMalloc( (float **) &h_Bp, bytes ) ;
hipHostMalloc( (float **) &h_dCp, bytes ) ;
hipMemset(h_Ap,0,bytes);
hipMemset(h_Bp,0,bytes);
hipMemset(h_dCp,0,bytes);
// init matrices with random data
initData(h_Ap,nx,ny,ny+my,1); initData(h_Bp,nx,ny,ny+my,0);
double timeStampA = getTimeStamp() ;
//transfer data to dev
hipMemcpy( d_A, h_Ap, bytes, hipMemcpyHostToDevice ) ;
hipMemcpy( d_B, h_Bp, bytes, hipMemcpyHostToDevice ) ;
double timeStampB = getTimeStamp() ;
// invoke Kernel
dim3 block( 16, 16) ; // you will want to configure this
dim3 grid( (nx+block.x-1)/block.x, (ny+my)/block.y) ;
//printf("Grid %d %d \n",(nx+block.x-1)/block.x,(ny+my)/block.y);
hipLaunchKernelGGL(( f_addmat), dim3(grid), dim3(block), 0, 0, d_A, d_B, d_C, nx, ny+my ) ;
hipDeviceSynchronize() ;
double timeStampC = getTimeStamp() ;
//copy data back
hipMemcpy(h_dCp, d_C, bytes, hipMemcpyDeviceToHost);
double timeStampD = getTimeStamp() ;
// free GPU resources
hipFree( d_A ) ; hipFree( d_B ) ; hipFree( d_C ) ;
// CPU Matrix add
h_addmat( h_Ap, h_Bp, h_hC, nx, ny+my ) ;
// Check results
int flag = 0;
for(int i=0;i<(nx);i++){
for(int j=0;j<(ny+my);j++){
if(h_hC[i*(ny+my)+j] != h_dCp[i*(ny+my)+j])
flag=1;
}
}
if (flag == 0){
printf("%.6f %.6f %.6f %.6f\n",(timeStampD-timeStampA),(timeStampB-timeStampA),(timeStampC-timeStampB),(timeStampD-timeStampC));
}
//free other resourses
hipHostFree(h_Ap); hipHostFree(h_Bp); hipHostFree(h_dCp);
free(h_hC);
hipDeviceReset() ;
}
| 62aeafa862975bcac22ed3ef4e63505c344b3d00.cu | #include <sys/time.h>
#include <cuda.h>
#include <stdio.h>
// time stamp function in seconds
double getTimeStamp() {
struct timeval tv ;
gettimeofday( &tv, NULL ) ;
return (double) tv.tv_usec/1000000 + tv.tv_sec ;
}
// host side matrix addition
void h_addmat(float *A, float *B, float *C, int nx, int ny){
for (int i =0;i<nx;i++){
for(int j=0;j<ny;j++){
C[i*ny+j] = A[i*ny+j]+B[i*ny+j];
}
}
return;
}
// device-side matrix addition
__global__ void f_addmat( float *A, float *B, float *C, int nx, int ny ){
int ix = threadIdx.x + blockIdx.x*(blockDim.x) ;
int iy = threadIdx.y + blockIdx.y*(blockDim.y) ;
int idx = ix*ny + iy ;
if( (ix<nx) && (iy<ny) ){
C[idx] = A[idx] + B[idx] ;
//printf("Thread %d %d\n",ix,iy);
}
}
void initData(float *M, int x, int y, int width, int flag ){
if(flag)
{
//printf("A\n");
for (int i=0;i<x;i++){
for (int j=0;j<y;j++){
M[i*width+j] = (float)(i+j)/3.0;
}
}
}
else
{
//printf("B\n");
for (int i=0;i<x;i++){
for (int j=0;j<y;j++){
M[i*width+j] = (float)3.14*(i+j) ;
}
}
}
}
int main( int argc, char *argv[] ) {
if (argc!=3){
printf("Error: Invalid number of arguments.\n");
exit(1);
}
int nx = atoi( argv[1] ) ; // should check validity
int ny = atoi( argv[2] ) ; // should check validity
if(nx <=0 || ny <=0){
printf("Error: Dimension lessThanOrEqualto Zero.\n");
exit(1);
}
int my=0;
if((ny%16) != 0){
my = 16 - (ny%16);
}
int noElems = (nx)*(ny+my) ;
int bytes = noElems * sizeof(float) ;
//printf ("%d %d %d %d \n",(nx*ny),(noElems),mx,my);
//float *h_hC = (float *) malloc( bytes ) ; // host result
float *d_A, *d_B, *d_C ;
cudaMalloc( (void **) &d_A, bytes ) ;
cudaMalloc( (void **) &d_B, bytes ) ;
cudaMalloc( (void **) &d_C, bytes ) ;
float *h_hC = (float *) malloc( bytes ) ; // host result
float *h_Ap, *h_Bp, *h_dCp;
cudaMallocHost( (float **) &h_Ap, bytes ) ;
cudaMallocHost( (float **) &h_Bp, bytes ) ;
cudaMallocHost( (float **) &h_dCp, bytes ) ;
cudaMemset(h_Ap,0,bytes);
cudaMemset(h_Bp,0,bytes);
cudaMemset(h_dCp,0,bytes);
// init matrices with random data
initData(h_Ap,nx,ny,ny+my,1); initData(h_Bp,nx,ny,ny+my,0);
double timeStampA = getTimeStamp() ;
//transfer data to dev
cudaMemcpy( d_A, h_Ap, bytes, cudaMemcpyHostToDevice ) ;
cudaMemcpy( d_B, h_Bp, bytes, cudaMemcpyHostToDevice ) ;
double timeStampB = getTimeStamp() ;
// invoke Kernel
dim3 block( 16, 16) ; // you will want to configure this
dim3 grid( (nx+block.x-1)/block.x, (ny+my)/block.y) ;
//printf("Grid %d %d \n",(nx+block.x-1)/block.x,(ny+my)/block.y);
f_addmat<<<grid, block>>>( d_A, d_B, d_C, nx, ny+my ) ;
cudaDeviceSynchronize() ;
double timeStampC = getTimeStamp() ;
//copy data back
cudaMemcpy(h_dCp, d_C, bytes, cudaMemcpyDeviceToHost);
double timeStampD = getTimeStamp() ;
// free GPU resources
cudaFree( d_A ) ; cudaFree( d_B ) ; cudaFree( d_C ) ;
// CPU Matrix add
h_addmat( h_Ap, h_Bp, h_hC, nx, ny+my ) ;
// Check results
int flag = 0;
for(int i=0;i<(nx);i++){
for(int j=0;j<(ny+my);j++){
if(h_hC[i*(ny+my)+j] != h_dCp[i*(ny+my)+j])
flag=1;
}
}
if (flag == 0){
printf("%.6f %.6f %.6f %.6f\n",(timeStampD-timeStampA),(timeStampB-timeStampA),(timeStampC-timeStampB),(timeStampD-timeStampC));
}
//free other resourses
cudaFreeHost(h_Ap); cudaFreeHost(h_Bp); cudaFreeHost(h_dCp);
free(h_hC);
cudaDeviceReset() ;
}
|
678fab1d2e8bbc9fdd1c9e2f82e7879c5eea6578.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/*-------------------------------------------------------------------------
*
* CUDA functions for Steepest descend in POCS-type algorithms.
*
* This file will iteratively minimize by stepest descend the total variation
* of the input image, with the parameters given, using GPUs.
*
* CODE by Ander Biguri
*
---------------------------------------------------------------------------
---------------------------------------------------------------------------
Copyright (c) 2015, University of Bath and CERN- European Organization for
Nuclear Research
All rights reserved.
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions are met:
1. Redistributions of source code must retain the above copyright notice,
this list of conditions and the following disclaimer.
2. Redistributions in binary form must reproduce the above copyright notice,
this list of conditions and the following disclaimer in the documentation
and/or other materials provided with the distribution.
3. Neither the name of the copyright holder nor the names of its contributors
may be used to endorse or promote products derived from this software without
specific prior written permission.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
POSSIBILITY OF SUCH DAMAGE.
---------------------------------------------------------------------------
Contact: [email protected]
Codes : https://github.com/CERN/TIGRE
---------------------------------------------------------------------------
*/
#define MAXTHREADS 1024
#include "POCS_TV.hpp"
#include <stdio.h>
#define cudaCheckErrors(msg) \
do { \
hipError_t __err = hipGetLastError(); \
if (__err != hipSuccess) { \
printf("%s \n",msg);\
printf("CBCT:CUDA:Atb",hipGetErrorString(__err));\
} \
} while (0)
// CUDA kernels
//https://stackoverflow.com/questions/21332040/simple-cuda-kernel-optimization/21340927#21340927
__global__ void divideArrayScalar(float* vec,float scalar,const size_t n)
{
unsigned long long i = (blockIdx.x * blockDim.x) + threadIdx.x;
for(; i<n; i+=gridDim.x*blockDim.x) {
vec[i]/=scalar;
}
}
__global__ void multiplyArrayScalar(float* vec,float scalar,const size_t n)
{
unsigned long long i = (blockIdx.x * blockDim.x) + threadIdx.x;
for(; i<n; i+=gridDim.x*blockDim.x) {
vec[i]*=scalar;
}
}
__global__ void substractArrays(float* vec,float* vec2,const size_t n)
{
unsigned long long i = (blockIdx.x * blockDim.x) + threadIdx.x;
for(; i<n; i+=gridDim.x*blockDim.x) {
vec[i]-=vec2[i];
}
}
__device__ __inline__
void gradient(const float* u, float* grad,
long z, long y, long x,
long depth, long rows, long cols)
{
unsigned long size2d = rows*cols;
unsigned long long idx = z * size2d + y * cols + x;
float uidx = u[idx];
if ( z - 1 >= 0 && z<depth) {
grad[0] = (uidx-u[(z-1)*size2d + y*cols + x]) ;
}
if ( y - 1 >= 0 && y<rows){
grad[1] = (uidx-u[z*size2d + (y-1)*cols + x]) ;
}
if ( x - 1 >= 0 && x<cols) {
grad[2] = (uidx-u[z*size2d + y*cols + (x-1)]);
}
}
__global__ void gradientTV(const float* f, float* dftv,
long depth, long rows, long cols){
unsigned long x = threadIdx.x + blockIdx.x * blockDim.x;
unsigned long y = threadIdx.y + blockIdx.y * blockDim.y;
unsigned long z = threadIdx.z + blockIdx.z * blockDim.z;
unsigned long long idx = z * rows * cols + y * cols + x;
if ( x >= cols || y >= rows || z >= depth )
return;
float df[3] ={0,0,0};
float dfi[3]={0,0,0}; // dfi== \partial f_{i+1,j,k}
float dfj[3]={0,0,0};
float dfk[3]={0,0,0};
gradient(f,df ,z ,y ,x , depth,rows,cols);
gradient(f,dfi ,z ,y ,x+1, depth,rows,cols);
gradient(f,dfj ,z ,y+1,x , depth,rows,cols);
gradient(f,dfk ,z+1,y ,x , depth,rows,cols);
float eps=0.00000001; //% avoid division by zero
dftv[idx]=(df[0]+df[1]+df[2])/(sqrt(df[0] *df[0] +df[1] *df[1] +df[2] *df[2])+eps)
-dfi[2]/(sqrt(dfi[0]*dfi[0]+dfi[1]*dfi[1]+dfi[2]*dfi[2]) +eps) // I wish I coudl precompute this, but if I do then Id need to recompute the gradient.
-dfj[1]/(sqrt(dfj[0]*dfj[0]+dfj[1]*dfj[1]+dfj[2]*dfj[2]) +eps)
-dfk[0]/(sqrt(dfk[0]*dfk[0]+dfk[1]*dfk[1]+dfk[2]*dfk[2]) +eps);
}
__device__ void warpReduce(volatile float *sdata, size_t tid) {
sdata[tid] += sdata[tid + 32];
sdata[tid] += sdata[tid + 16];
sdata[tid] += sdata[tid + 8];
sdata[tid] += sdata[tid + 4];
sdata[tid] += sdata[tid + 2];
sdata[tid] += sdata[tid + 1];
}
__global__ void reduceNorm2(float *g_idata, float *g_odata, size_t n){
extern __shared__ volatile float sdata[];
//http://stackoverflow.com/a/35133396/1485872
size_t tid = threadIdx.x;
size_t i = blockIdx.x*blockDim.x + tid;
size_t gridSize = blockDim.x*gridDim.x;
float mySum = 0;
float value=0;
while (i < n) {
value=g_idata[i]; //avoid reading twice
mySum += value*value;
i += gridSize;
}
sdata[tid] = mySum;
__syncthreads();
if (tid < 512)
sdata[tid] += sdata[tid + 512];
__syncthreads();
if (tid < 256)
sdata[tid] += sdata[tid + 256];
__syncthreads();
if (tid < 128)
sdata[tid] += sdata[tid + 128];
__syncthreads();
if (tid < 64)
sdata[tid] += sdata[tid + 64];
__syncthreads();
#if (__CUDA_ARCH__ >= 300)
if ( tid < 32 )
{
mySum = sdata[tid] + sdata[tid + 32];
for (int offset = warpSize/2; offset > 0; offset /= 2) {
mySum += __shfl_down(mySum, offset);
}
}
#else
if (tid < 32) {
warpReduce(sdata, tid);
mySum = sdata[0];
}
#endif
if (tid == 0) g_odata[blockIdx.x] = mySum;
}
__global__ void reduceSum(float *g_idata, float *g_odata, size_t n){
extern __shared__ volatile float sdata[];
//http://stackoverflow.com/a/35133396/1485872
size_t tid = threadIdx.x;
size_t i = blockIdx.x*blockDim.x + tid;
size_t gridSize = blockDim.x*gridDim.x;
float mySum = 0;
// float value=0;
while (i < n) {
mySum += g_idata[i];
i += gridSize;
}
sdata[tid] = mySum;
__syncthreads();
if (tid < 512)
sdata[tid] += sdata[tid + 512];
__syncthreads();
if (tid < 256)
sdata[tid] += sdata[tid + 256];
__syncthreads();
if (tid < 128)
sdata[tid] += sdata[tid + 128];
__syncthreads();
if (tid < 64)
sdata[tid] += sdata[tid + 64];
__syncthreads();
#if (__CUDA_ARCH__ >= 300)
if ( tid < 32 )
{
mySum = sdata[tid] + sdata[tid + 32];
for (int offset = warpSize/2; offset > 0; offset /= 2) {
mySum += __shfl_down(mySum, offset);
}
}
#else
if (tid < 32) {
warpReduce(sdata, tid);
mySum = sdata[0];
}
#endif
if (tid == 0) g_odata[blockIdx.x] = mySum;
}
// main function
void pocs_tv(const float* img,float* dst,float alpha,const long* image_size, int maxIter){
size_t total_pixels = image_size[0] * image_size[1] * image_size[2] ;
size_t mem_size = sizeof(float) * total_pixels;
float *d_image, *d_dimgTV,*d_norm2aux,*d_norm2;
// memory for image
hipMalloc(&d_image, mem_size);
cudaCheckErrors("Malloc Image error");
hipMemcpy(d_image, img, mem_size, hipMemcpyHostToDevice);
cudaCheckErrors("Memory Malloc and Memset: SRC");
// memory for df
hipMalloc(&d_dimgTV, mem_size);
cudaCheckErrors("Memory Malloc and Memset: TV");
hipMalloc(&d_norm2, mem_size);
cudaCheckErrors("Memory Malloc and Memset: TV");
// memory for L2norm auxiliar
hipMalloc(&d_norm2aux, sizeof(float)*(total_pixels + MAXTHREADS - 1) / MAXTHREADS);
cudaCheckErrors("Memory Malloc and Memset: NORMAux");
// For the gradient
dim3 blockGrad(10, 10, 10);
dim3 gridGrad((image_size[0]+blockGrad.x-1)/blockGrad.x, (image_size[1]+blockGrad.y-1)/blockGrad.y, (image_size[2]+blockGrad.z-1)/blockGrad.z);
// For the reduction
float sumnorm2;
for(unsigned int i=0;i<maxIter;i++){
// Compute the gradient of the TV norm
hipLaunchKernelGGL(( gradientTV), dim3(gridGrad), dim3(blockGrad), 0, 0, d_image,d_dimgTV,image_size[2], image_size[1],image_size[0]);
cudaCheckErrors("Gradient");
// hipMemcpy(dst, d_dimgTV, mem_size, hipMemcpyDeviceToHost);
hipMemcpy(d_norm2, d_dimgTV, mem_size, hipMemcpyDeviceToDevice);
cudaCheckErrors("Copy from gradient call error");
// Compute the L2 norm of the gradint. For that, reduction is used.
//REDUCE
size_t dimblockRed = MAXTHREADS;
size_t dimgridRed = (total_pixels + MAXTHREADS - 1) / MAXTHREADS;
reduceNorm2 << <dimgridRed, dimblockRed, MAXTHREADS*sizeof(float) >> >(d_norm2, d_norm2aux, total_pixels);
cudaCheckErrors("reduce1");
if (dimgridRed > 1) {
reduceSum << <1, dimblockRed, MAXTHREADS*sizeof(float) >> >(d_norm2aux, d_norm2, dimgridRed);
cudaCheckErrors("reduce2");
hipMemcpy(&sumnorm2, d_norm2, sizeof(float), hipMemcpyDeviceToHost);
cudaCheckErrors("hipMemcpy");
}
else {
hipMemcpy(&sumnorm2, d_norm2aux, sizeof(float), hipMemcpyDeviceToHost);
cudaCheckErrors("hipMemcpy");
}
//mexPrintf("%f ",sqrt(sumnorm2));
//NOMRALIZE
//in a Tesla, maximum blocks =15 SM * 4 blocks/SM
hipLaunchKernelGGL(( divideArrayScalar) , dim3(60),dim3(MAXTHREADS), 0, 0, d_dimgTV,sqrt(sumnorm2),total_pixels);
cudaCheckErrors("Division error");
//MULTIPLY HYPERPARAMETER
hipLaunchKernelGGL(( multiplyArrayScalar), dim3(60),dim3(MAXTHREADS), 0, 0, d_dimgTV,alpha, total_pixels);
cudaCheckErrors("Multiplication error");
//SUBSTRACT GRADIENT
hipLaunchKernelGGL(( substractArrays) , dim3(60),dim3(MAXTHREADS), 0, 0, d_image,d_dimgTV, total_pixels);
cudaCheckErrors("Substraction error");
sumnorm2=0;
}
cudaCheckErrors("TV minimization");
hipMemcpy(dst, d_image, mem_size, hipMemcpyDeviceToHost);
cudaCheckErrors("Copy result back");
hipFree(d_image);
hipFree(d_norm2aux);
hipFree(d_dimgTV);
hipFree(d_norm2);
cudaCheckErrors("Memory free");
hipDeviceReset();
}
| 678fab1d2e8bbc9fdd1c9e2f82e7879c5eea6578.cu | /*-------------------------------------------------------------------------
*
* CUDA functions for Steepest descend in POCS-type algorithms.
*
* This file will iteratively minimize by stepest descend the total variation
* of the input image, with the parameters given, using GPUs.
*
* CODE by Ander Biguri
*
---------------------------------------------------------------------------
---------------------------------------------------------------------------
Copyright (c) 2015, University of Bath and CERN- European Organization for
Nuclear Research
All rights reserved.
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions are met:
1. Redistributions of source code must retain the above copyright notice,
this list of conditions and the following disclaimer.
2. Redistributions in binary form must reproduce the above copyright notice,
this list of conditions and the following disclaimer in the documentation
and/or other materials provided with the distribution.
3. Neither the name of the copyright holder nor the names of its contributors
may be used to endorse or promote products derived from this software without
specific prior written permission.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
POSSIBILITY OF SUCH DAMAGE.
---------------------------------------------------------------------------
Contact: [email protected]
Codes : https://github.com/CERN/TIGRE
---------------------------------------------------------------------------
*/
#define MAXTHREADS 1024
#include "POCS_TV.hpp"
#include <stdio.h>
#define cudaCheckErrors(msg) \
do { \
cudaError_t __err = cudaGetLastError(); \
if (__err != cudaSuccess) { \
printf("%s \n",msg);\
printf("CBCT:CUDA:Atb",cudaGetErrorString(__err));\
} \
} while (0)
// CUDA kernels
//https://stackoverflow.com/questions/21332040/simple-cuda-kernel-optimization/21340927#21340927
__global__ void divideArrayScalar(float* vec,float scalar,const size_t n)
{
unsigned long long i = (blockIdx.x * blockDim.x) + threadIdx.x;
for(; i<n; i+=gridDim.x*blockDim.x) {
vec[i]/=scalar;
}
}
__global__ void multiplyArrayScalar(float* vec,float scalar,const size_t n)
{
unsigned long long i = (blockIdx.x * blockDim.x) + threadIdx.x;
for(; i<n; i+=gridDim.x*blockDim.x) {
vec[i]*=scalar;
}
}
__global__ void substractArrays(float* vec,float* vec2,const size_t n)
{
unsigned long long i = (blockIdx.x * blockDim.x) + threadIdx.x;
for(; i<n; i+=gridDim.x*blockDim.x) {
vec[i]-=vec2[i];
}
}
__device__ __inline__
void gradient(const float* u, float* grad,
long z, long y, long x,
long depth, long rows, long cols)
{
unsigned long size2d = rows*cols;
unsigned long long idx = z * size2d + y * cols + x;
float uidx = u[idx];
if ( z - 1 >= 0 && z<depth) {
grad[0] = (uidx-u[(z-1)*size2d + y*cols + x]) ;
}
if ( y - 1 >= 0 && y<rows){
grad[1] = (uidx-u[z*size2d + (y-1)*cols + x]) ;
}
if ( x - 1 >= 0 && x<cols) {
grad[2] = (uidx-u[z*size2d + y*cols + (x-1)]);
}
}
__global__ void gradientTV(const float* f, float* dftv,
long depth, long rows, long cols){
unsigned long x = threadIdx.x + blockIdx.x * blockDim.x;
unsigned long y = threadIdx.y + blockIdx.y * blockDim.y;
unsigned long z = threadIdx.z + blockIdx.z * blockDim.z;
unsigned long long idx = z * rows * cols + y * cols + x;
if ( x >= cols || y >= rows || z >= depth )
return;
float df[3] ={0,0,0};
float dfi[3]={0,0,0}; // dfi== \partial f_{i+1,j,k}
float dfj[3]={0,0,0};
float dfk[3]={0,0,0};
gradient(f,df ,z ,y ,x , depth,rows,cols);
gradient(f,dfi ,z ,y ,x+1, depth,rows,cols);
gradient(f,dfj ,z ,y+1,x , depth,rows,cols);
gradient(f,dfk ,z+1,y ,x , depth,rows,cols);
float eps=0.00000001; //% avoid division by zero
dftv[idx]=(df[0]+df[1]+df[2])/(sqrt(df[0] *df[0] +df[1] *df[1] +df[2] *df[2])+eps)
-dfi[2]/(sqrt(dfi[0]*dfi[0]+dfi[1]*dfi[1]+dfi[2]*dfi[2]) +eps) // I wish I coudl precompute this, but if I do then Id need to recompute the gradient.
-dfj[1]/(sqrt(dfj[0]*dfj[0]+dfj[1]*dfj[1]+dfj[2]*dfj[2]) +eps)
-dfk[0]/(sqrt(dfk[0]*dfk[0]+dfk[1]*dfk[1]+dfk[2]*dfk[2]) +eps);
}
__device__ void warpReduce(volatile float *sdata, size_t tid) {
sdata[tid] += sdata[tid + 32];
sdata[tid] += sdata[tid + 16];
sdata[tid] += sdata[tid + 8];
sdata[tid] += sdata[tid + 4];
sdata[tid] += sdata[tid + 2];
sdata[tid] += sdata[tid + 1];
}
__global__ void reduceNorm2(float *g_idata, float *g_odata, size_t n){
extern __shared__ volatile float sdata[];
//http://stackoverflow.com/a/35133396/1485872
size_t tid = threadIdx.x;
size_t i = blockIdx.x*blockDim.x + tid;
size_t gridSize = blockDim.x*gridDim.x;
float mySum = 0;
float value=0;
while (i < n) {
value=g_idata[i]; //avoid reading twice
mySum += value*value;
i += gridSize;
}
sdata[tid] = mySum;
__syncthreads();
if (tid < 512)
sdata[tid] += sdata[tid + 512];
__syncthreads();
if (tid < 256)
sdata[tid] += sdata[tid + 256];
__syncthreads();
if (tid < 128)
sdata[tid] += sdata[tid + 128];
__syncthreads();
if (tid < 64)
sdata[tid] += sdata[tid + 64];
__syncthreads();
#if (__CUDA_ARCH__ >= 300)
if ( tid < 32 )
{
mySum = sdata[tid] + sdata[tid + 32];
for (int offset = warpSize/2; offset > 0; offset /= 2) {
mySum += __shfl_down(mySum, offset);
}
}
#else
if (tid < 32) {
warpReduce(sdata, tid);
mySum = sdata[0];
}
#endif
if (tid == 0) g_odata[blockIdx.x] = mySum;
}
__global__ void reduceSum(float *g_idata, float *g_odata, size_t n){
extern __shared__ volatile float sdata[];
//http://stackoverflow.com/a/35133396/1485872
size_t tid = threadIdx.x;
size_t i = blockIdx.x*blockDim.x + tid;
size_t gridSize = blockDim.x*gridDim.x;
float mySum = 0;
// float value=0;
while (i < n) {
mySum += g_idata[i];
i += gridSize;
}
sdata[tid] = mySum;
__syncthreads();
if (tid < 512)
sdata[tid] += sdata[tid + 512];
__syncthreads();
if (tid < 256)
sdata[tid] += sdata[tid + 256];
__syncthreads();
if (tid < 128)
sdata[tid] += sdata[tid + 128];
__syncthreads();
if (tid < 64)
sdata[tid] += sdata[tid + 64];
__syncthreads();
#if (__CUDA_ARCH__ >= 300)
if ( tid < 32 )
{
mySum = sdata[tid] + sdata[tid + 32];
for (int offset = warpSize/2; offset > 0; offset /= 2) {
mySum += __shfl_down(mySum, offset);
}
}
#else
if (tid < 32) {
warpReduce(sdata, tid);
mySum = sdata[0];
}
#endif
if (tid == 0) g_odata[blockIdx.x] = mySum;
}
// main function
void pocs_tv(const float* img,float* dst,float alpha,const long* image_size, int maxIter){
size_t total_pixels = image_size[0] * image_size[1] * image_size[2] ;
size_t mem_size = sizeof(float) * total_pixels;
float *d_image, *d_dimgTV,*d_norm2aux,*d_norm2;
// memory for image
cudaMalloc(&d_image, mem_size);
cudaCheckErrors("Malloc Image error");
cudaMemcpy(d_image, img, mem_size, cudaMemcpyHostToDevice);
cudaCheckErrors("Memory Malloc and Memset: SRC");
// memory for df
cudaMalloc(&d_dimgTV, mem_size);
cudaCheckErrors("Memory Malloc and Memset: TV");
cudaMalloc(&d_norm2, mem_size);
cudaCheckErrors("Memory Malloc and Memset: TV");
// memory for L2norm auxiliar
cudaMalloc(&d_norm2aux, sizeof(float)*(total_pixels + MAXTHREADS - 1) / MAXTHREADS);
cudaCheckErrors("Memory Malloc and Memset: NORMAux");
// For the gradient
dim3 blockGrad(10, 10, 10);
dim3 gridGrad((image_size[0]+blockGrad.x-1)/blockGrad.x, (image_size[1]+blockGrad.y-1)/blockGrad.y, (image_size[2]+blockGrad.z-1)/blockGrad.z);
// For the reduction
float sumnorm2;
for(unsigned int i=0;i<maxIter;i++){
// Compute the gradient of the TV norm
gradientTV<<<gridGrad, blockGrad>>>(d_image,d_dimgTV,image_size[2], image_size[1],image_size[0]);
cudaCheckErrors("Gradient");
// cudaMemcpy(dst, d_dimgTV, mem_size, cudaMemcpyDeviceToHost);
cudaMemcpy(d_norm2, d_dimgTV, mem_size, cudaMemcpyDeviceToDevice);
cudaCheckErrors("Copy from gradient call error");
// Compute the L2 norm of the gradint. For that, reduction is used.
//REDUCE
size_t dimblockRed = MAXTHREADS;
size_t dimgridRed = (total_pixels + MAXTHREADS - 1) / MAXTHREADS;
reduceNorm2 << <dimgridRed, dimblockRed, MAXTHREADS*sizeof(float) >> >(d_norm2, d_norm2aux, total_pixels);
cudaCheckErrors("reduce1");
if (dimgridRed > 1) {
reduceSum << <1, dimblockRed, MAXTHREADS*sizeof(float) >> >(d_norm2aux, d_norm2, dimgridRed);
cudaCheckErrors("reduce2");
cudaMemcpy(&sumnorm2, d_norm2, sizeof(float), cudaMemcpyDeviceToHost);
cudaCheckErrors("cudaMemcpy");
}
else {
cudaMemcpy(&sumnorm2, d_norm2aux, sizeof(float), cudaMemcpyDeviceToHost);
cudaCheckErrors("cudaMemcpy");
}
//mexPrintf("%f ",sqrt(sumnorm2));
//NOMRALIZE
//in a Tesla, maximum blocks =15 SM * 4 blocks/SM
divideArrayScalar <<<60,MAXTHREADS>>>(d_dimgTV,sqrt(sumnorm2),total_pixels);
cudaCheckErrors("Division error");
//MULTIPLY HYPERPARAMETER
multiplyArrayScalar<<<60,MAXTHREADS>>>(d_dimgTV,alpha, total_pixels);
cudaCheckErrors("Multiplication error");
//SUBSTRACT GRADIENT
substractArrays <<<60,MAXTHREADS>>>(d_image,d_dimgTV, total_pixels);
cudaCheckErrors("Substraction error");
sumnorm2=0;
}
cudaCheckErrors("TV minimization");
cudaMemcpy(dst, d_image, mem_size, cudaMemcpyDeviceToHost);
cudaCheckErrors("Copy result back");
cudaFree(d_image);
cudaFree(d_norm2aux);
cudaFree(d_dimgTV);
cudaFree(d_norm2);
cudaCheckErrors("Memory free");
cudaDeviceReset();
}
|
8fd623c2e1127fd0ebc01b9b5fae2c0606844d6f.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
// Based on CUDA SDK template from NVIDIA
// sgm algorithm adapted from http://lunokhod.org/?p=1403
// includes, system
#include <stdlib.h>
#include <stdio.h>
#include <string.h>
#include <math.h>
#include <unistd.h>
#include <assert.h>
#include <float.h>
#include <stdlib.h>
#include <limits>
#include <algorithm>
#include <climits>
// includes, project
#include <cutil_inline.h>
#define MMAX_BRIGHTNESS 255
#define PENALTY1 15
#define PENALTY2 100
#define COSTS(i,j,d) costs[(i)*disp_range+(j)*nx*disp_range+(d)]
#define ACCUMULATED_COSTS(i,j,d) accumulated_costs[(i)*disp_range+(j)*nx*disp_range+(d)]
#define LEFT_IMAGE(i,j) left_image[(i)+(j)*nx]
#define RIGHT_IMAGE(i,j) right_image[(i)+(j)*nx]
#define DISP_IMAGE(i,j) disp_image[(i)+(j)*nx]
#define MMAX(a,b) (((a)>(b))?(a):(b))
#define MMIN(a,b) (((a)<(b))?(a):(b))
/* function headers */
void determine_costs(const int *left_image, const int *right_image, int *costs,
const int nx, const int ny, const int disp_range);
void evaluate_path( const int *prior, const int* local,
int path_intensity_gradient, int *curr_cost,
const int nx, const int ny, const int disp_range );
__device__ void d_evaluate_path( const int *prior, const int* local,
int path_intensity_gradient, int *curr_cost,
const int nx, const int ny, const int disp_range );
void iterate_direction_dirxpos(const int dirx, const int *left_image,
const int* costs, int *accumulated_costs,
const int nx, const int ny, const int disp_range );
void iterate_direction_dirypos(const int diry, const int *left_image,
const int* costs, int *accumulated_costs,
const int nx, const int ny, const int disp_range );
void iterate_direction_dirxneg(const int dirx, const int *left_image,
const int* costs, int *accumulated_costs,
const int nx, const int ny, const int disp_range );
void iterate_direction_diryneg(const int diry, const int *left_image,
const int* costs, int *accumulated_costs,
const int nx, const int ny, const int disp_range );
void iterate_direction( const int dirx, const int diry, const int *left_image,
const int* costs, int *accumulated_costs,
const int nx, const int ny, const int disp_range ) ;
void inplace_sum_views( int * im1, const int * im2,
const int nx, const int ny, const int disp_range ) ;
int find_min_index( const int *v, const int dist_range ) ;
__device__ int d_find_min_index( const int *v, const int dist_range ) ;
void create_disparity_view( const int *accumulated_costs , int * disp_image, int nx, int ny) ;
void sgmHost( const int *h_leftIm, const int *h_rightIm,
int *h_dispIm,
const int w, const int h, const int disp_range );
void sgmDevice( const int *h_leftIm, const int *h_rightIm,
int *h_dispImD,
const int w, const int h, const int disp_range );
void usage(char *command);
/* Kernels */
__global__
void d_determine_costs( const int *left_image, const int *right_image, int *costs,
const int nx, const int ny, const int disp_range) {
int i = blockIdx.x * blockDim.x + threadIdx.x;
int j = blockIdx.y * blockDim.y + threadIdx.y;
int d = threadIdx.z;
//COSTS(i,j,d) = 255u;
if(j < ny && i < nx && d < disp_range) {
COSTS(i,j,d) = 255u;
if(i >= d) {
COSTS(i,j,d) = abs( LEFT_IMAGE(i,j) - RIGHT_IMAGE(i-d,j) );
}
}
}
__global__
void d_iterate_direction_dirxpos(const int dirx, const int *left_image,
const int* costs, int *accumulated_costs,
const int nx, const int ny, const int disp_range )
{
int i = blockIdx.x * blockDim.x + threadIdx.x;
int j = blockIdx.y * blockDim.y + threadIdx.y;
int d = threadIdx.z;
const int WIDTH = nx;
const int HEIGHT = ny;
if(j < HEIGHT && i < WIDTH)
if(i==0){
if(d < disp_range)
ACCUMULATED_COSTS(0,j,d) += COSTS(0,j,d);
} else {
d_evaluate_path( &ACCUMULATED_COSTS(i-dirx,j,0),
&COSTS(i,j,0),
abs(LEFT_IMAGE(i,j)-LEFT_IMAGE(i-dirx,j)) ,
&ACCUMULATED_COSTS(i,j,0), nx, ny, disp_range);
}
}
__global__
void d_iterate_direction_dirypos(const int diry, const int *left_image,
const int* costs, int *accumulated_costs,
const int nx, const int ny, const int disp_range )
{
int i = blockIdx.x * blockDim.x + threadIdx.x;
int j = blockIdx.y * blockDim.y + threadIdx.y;
int d = threadIdx.z;
const int WIDTH = nx;
const int HEIGHT = ny;
if(i < WIDTH && j < HEIGHT)
if(j == 0)
if(d < disp_range)
ACCUMULATED_COSTS(i,0,d) += COSTS(i,0,d);
else
d_evaluate_path( &ACCUMULATED_COSTS(i,j-diry,0),
&COSTS(i,j,0),
abs(LEFT_IMAGE(i,j)-LEFT_IMAGE(i,j-diry)),
&ACCUMULATED_COSTS(i,j,0), nx, ny, disp_range );
/*
for ( int i = 0; i < WIDTH; i++ ) {
for ( int j = 0; j < HEIGHT; j++ ) {
if(j==0) {
for ( int d = 0; d < disp_range; d++ ) {
ACCUMULATED_COSTS(i,0,d) += COSTS(i,0,d);
}
}
else {
evaluate_path( &ACCUMULATED_COSTS(i,j-diry,0),
&COSTS(i,j,0),
abs(LEFT_IMAGE(i,j)-LEFT_IMAGE(i,j-diry)),
&ACCUMULATED_COSTS(i,j,0), nx, ny, disp_range );
}
}
}*/
}
__global__
void d_iterate_direction_dirxneg(const int dirx, const int *left_image,
const int* costs, int *accumulated_costs,
const int nx, const int ny, const int disp_range )
{
int i = blockIdx.x * blockDim.x + threadIdx.x;
int j = blockIdx.y * blockDim.y + threadIdx.y;
int d = threadIdx.z;
const int WIDTH = nx;
const int HEIGHT = ny;
if(j < HEIGHT && i >= 0){
if(i == WIDTH-1){
if(d < disp_range) {
ACCUMULATED_COSTS(WIDTH-1,j,d) += COSTS(WIDTH-1,j,d);
}
else {
d_evaluate_path( &ACCUMULATED_COSTS(i-dirx,j,0),
&COSTS(i,j,0),
abs(LEFT_IMAGE(i,j)-LEFT_IMAGE(i-dirx,j)),
&ACCUMULATED_COSTS(i,j,0), nx, ny, disp_range );
}
}
}
/*
for ( int j = 0; j < HEIGHT; j++ ) {
for ( int i = WIDTH-1; i >= 0; i-- ) {
if(i==WIDTH-1) {
for ( int d = 0; d < disp_range; d++ ) {
ACCUMULATED_COSTS(WIDTH-1,j,d) += COSTS(WIDTH-1,j,d);
}
}
else {
evaluate_path( &ACCUMULATED_COSTS(i-dirx,j,0),
&COSTS(i,j,0),
abs(LEFT_IMAGE(i,j)-LEFT_IMAGE(i-dirx,j)),
&ACCUMULATED_COSTS(i,j,0), nx, ny, disp_range );
}
}
}*/
}
__global__
void d_iterate_direction_diryneg(const int diry, const int *left_image,
const int* costs, int *accumulated_costs,
const int nx, const int ny, const int disp_range )
{
int i = blockIdx.x * blockDim.x + threadIdx.x;
int j = blockIdx.y * blockDim.y + threadIdx.y;
int d = threadIdx.z;
const int WIDTH = nx;
const int HEIGHT = ny;
if(i < WIDTH && j >= 0) {
if(j == HEIGHT-1) {
ACCUMULATED_COSTS(i,HEIGHT-1,d) += COSTS(i,HEIGHT-1,d);
}
else {
d_evaluate_path( &ACCUMULATED_COSTS(i,j-diry,0),
&COSTS(i,j,0),
abs(LEFT_IMAGE(i,j)-LEFT_IMAGE(i,j-diry)),
&ACCUMULATED_COSTS(i,j,0) , nx, ny, disp_range);
}
}
/*
for ( int i = 0; i < WIDTH; i++ ) {
for ( int j = HEIGHT-1; j >= 0; j-- ) {
if(j==HEIGHT-1) {
for ( int d = 0; d < disp_range; d++ ) {
ACCUMULATED_COSTS(i,HEIGHT-1,d) += COSTS(i,HEIGHT-1,d);
}
}
else {
evaluate_path( &ACCUMULATED_COSTS(i,j-diry,0),
&COSTS(i,j,0),
abs(LEFT_IMAGE(i,j)-LEFT_IMAGE(i,j-diry)),
&ACCUMULATED_COSTS(i,j,0) , nx, ny, disp_range);
}
}
}
*/
}
/* functions code */
void determine_costs(const int *left_image, const int *right_image, int *costs,
const int nx, const int ny, const int disp_range)
{
std::fill(costs, costs+nx*ny*disp_range, 255u);
for ( int j = 0; j < ny; j++ ) {
for ( int d = 0; d < disp_range; d++ ) {
for ( int i = d; i < nx; i++ ) {
COSTS(i,j,d) = abs( LEFT_IMAGE(i,j) - RIGHT_IMAGE(i-d,j) );
}
}
}
}
void iterate_direction_dirxpos(const int dirx, const int *left_image,
const int* costs, int *accumulated_costs,
const int nx, const int ny, const int disp_range )
{
const int WIDTH = nx;
const int HEIGHT = ny;
for ( int j = 0; j < HEIGHT; j++ ) {
for ( int i = 0; i < WIDTH; i++ ) {
if(i==0) {
for ( int d = 0; d < disp_range; d++ ) {
ACCUMULATED_COSTS(0,j,d) += COSTS(0,j,d);
}
}
else {
evaluate_path( &ACCUMULATED_COSTS(i-dirx,j,0),
&COSTS(i,j,0),
abs(LEFT_IMAGE(i,j)-LEFT_IMAGE(i-dirx,j)) ,
&ACCUMULATED_COSTS(i,j,0), nx, ny, disp_range);
}
}
}
}
void iterate_direction_dirypos(const int diry, const int *left_image,
const int* costs, int *accumulated_costs,
const int nx, const int ny, const int disp_range )
{
const int WIDTH = nx;
const int HEIGHT = ny;
for ( int i = 0; i < WIDTH; i++ ) {
for ( int j = 0; j < HEIGHT; j++ ) {
if(j==0) {
for ( int d = 0; d < disp_range; d++ ) {
ACCUMULATED_COSTS(i,0,d) += COSTS(i,0,d);
}
}
else {
evaluate_path( &ACCUMULATED_COSTS(i,j-diry,0),
&COSTS(i,j,0),
abs(LEFT_IMAGE(i,j)-LEFT_IMAGE(i,j-diry)),
&ACCUMULATED_COSTS(i,j,0), nx, ny, disp_range );
}
}
}
}
void iterate_direction_dirxneg(const int dirx, const int *left_image,
const int* costs, int *accumulated_costs,
const int nx, const int ny, const int disp_range )
{
const int WIDTH = nx;
const int HEIGHT = ny;
for ( int j = 0; j < HEIGHT; j++ ) {
for ( int i = WIDTH-1; i >= 0; i-- ) {
if(i==WIDTH-1) {
for ( int d = 0; d < disp_range; d++ ) {
ACCUMULATED_COSTS(WIDTH-1,j,d) += COSTS(WIDTH-1,j,d);
}
}
else {
evaluate_path( &ACCUMULATED_COSTS(i-dirx,j,0),
&COSTS(i,j,0),
abs(LEFT_IMAGE(i,j)-LEFT_IMAGE(i-dirx,j)),
&ACCUMULATED_COSTS(i,j,0), nx, ny, disp_range );
}
}
}
}
void iterate_direction_diryneg(const int diry, const int *left_image,
const int* costs, int *accumulated_costs,
const int nx, const int ny, const int disp_range )
{
const int WIDTH = nx;
const int HEIGHT = ny;
for ( int i = 0; i < WIDTH; i++ ) {
for ( int j = HEIGHT-1; j >= 0; j-- ) {
if(j==HEIGHT-1) {
for ( int d = 0; d < disp_range; d++ ) {
ACCUMULATED_COSTS(i,HEIGHT-1,d) += COSTS(i,HEIGHT-1,d);
}
}
else {
evaluate_path( &ACCUMULATED_COSTS(i,j-diry,0),
&COSTS(i,j,0),
abs(LEFT_IMAGE(i,j)-LEFT_IMAGE(i,j-diry)),
&ACCUMULATED_COSTS(i,j,0) , nx, ny, disp_range);
}
}
}
}
void iterate_direction( const int dirx, const int diry, const int *left_image,
const int* costs, int *accumulated_costs,
const int nx, const int ny, const int disp_range )
{
// Walk along the edges in a clockwise fashion
if ( dirx > 0 ) {
// LEFT MOST EDGE
// Process every pixel along this edge
iterate_direction_dirxpos(dirx,left_image,costs,accumulated_costs, nx, ny, disp_range);
}
else if ( diry > 0 ) {
// TOP MOST EDGE
// Process every pixel along this edge only if dirx ==
// 0. Otherwise skip the top left most pixel
iterate_direction_dirypos(diry,left_image,costs,accumulated_costs, nx, ny, disp_range);
}
else if ( dirx < 0 ) {
// RIGHT MOST EDGE
// Process every pixel along this edge only if diry ==
// 0. Otherwise skip the top right most pixel
iterate_direction_dirxneg(dirx,left_image,costs,accumulated_costs, nx, ny, disp_range);
}
else if ( diry < 0 ) {
// BOTTOM MOST EDGE
// Process every pixel along this edge only if dirx ==
// 0. Otherwise skip the bottom left and bottom right pixel
iterate_direction_diryneg(diry,left_image,costs,accumulated_costs, nx, ny, disp_range);
}
}
void d_iterate_direction( const int dirx, const int diry, const int *left_image,
const int* costs, int *accumulated_costs,
const int nx, const int ny, const int disp_range, dim3* grid, dim3* block )
{
// Walk along the edges in a clockwise fashion
if ( dirx > 0 ) {
// LEFT MOST EDGE
// Process every pixel along this edge
hipLaunchKernelGGL(( d_iterate_direction_dirxpos), dim3(*grid), dim3(*block) , 0, 0, dirx,left_image,costs,accumulated_costs, nx, ny, disp_range);
}
else if ( diry > 0 ) {
// TOP MOST EDGE
// Process every pixel along this edge only if dirx ==
// 0. Otherwise skip the top left most pixel
hipLaunchKernelGGL(( d_iterate_direction_dirypos), dim3(*grid), dim3(*block) , 0, 0, diry,left_image,costs,accumulated_costs, nx, ny, disp_range);
}
else if ( dirx < 0 ) {
// RIGHT MOST EDGE
// Process every pixel along this edge only if diry ==
// 0. Otherwise skip the top right most pixel
hipLaunchKernelGGL(( d_iterate_direction_dirxneg), dim3(*grid), dim3(*block) , 0, 0, dirx,left_image,costs,accumulated_costs, nx, ny, disp_range);
}
else if ( diry < 0 ) {
// BOTTOM MOST EDGE
// Process every pixel along this edge only if dirx ==
// 0. Otherwise skip the bottom left and bottom right pixel
hipLaunchKernelGGL(( d_iterate_direction_diryneg), dim3(*grid), dim3(*block) , 0, 0, diry,left_image,costs,accumulated_costs, nx, ny, disp_range);
}
}
// ADD two cost images
void inplace_sum_views( int * im1, const int * im2,
const int nx, const int ny, const int disp_range )
{
int *im1_init = im1;
while ( im1 != (im1_init + (nx*ny*disp_range)) ) {
*im1 += *im2;
im1++;
im2++;
}
}
// ADD two cost images
__global__ void d_inplace_sum_views( int * im1, const int * im2,
const int nx, const int ny, const int disp_range )
{
int i = blockIdx.x * blockDim.x + threadIdx.x;
int j = blockIdx.y * blockDim.y + threadIdx.y;
int d = threadIdx.z;
int current_idx = ((i)*disp_range+(j)*nx*disp_range+(d));
if(j < ny && i < nx && d < disp_range) {
im1[current_idx] += im2[current_idx];
}
/*int *im1_init = im1;
if(im1 != (im1_init + (nx*ny*disp_range))) {
*im1 += *im2;
im1++;
im2++;
}*/
}
__device__ int d_find_min_index( const int *v, const int disp_range )
{
int min = INT_MAX;
int minind = -1;
for (int d=0; d < disp_range; d++) {
if(v[d]<min) {
min = v[d];
minind = d;
}
}
return minind;
}
int find_min_index( const int *v, const int disp_range )
{
int min = std::numeric_limits<int>::max();
int minind = -1;
for (int d=0; d < disp_range; d++) {
if(v[d]<min) {
min = v[d];
minind = d;
}
}
return minind;
}
__device__ void d_evaluate_path(const int *prior, const int *local,
int path_intensity_gradient, int *curr_cost ,
const int nx, const int ny, const int disp_range)
{
memcpy(curr_cost, local, sizeof(int)*disp_range);
for ( int d = 0; d < disp_range; d++ ) {
int e_smooth = INT_MAX;
for ( int d_p = 0; d_p < disp_range; d_p++ ) {
if ( d_p - d == 0 ) {
// No penality
e_smooth = MMIN(e_smooth,prior[d_p]);
} else if ( abs(d_p - d) == 1 ) {
// Small penality
e_smooth = MMIN(e_smooth,prior[d_p]+PENALTY1);
} else {
// Large penality
e_smooth =
MMIN(e_smooth,prior[d_p] +
MMAX(PENALTY1,
path_intensity_gradient ? PENALTY2/path_intensity_gradient : PENALTY2));
}
}
curr_cost[d] += e_smooth;
}
int min = INT_MAX;
for ( int d = 0; d < disp_range; d++ ) {
if (prior[d]<min) min=prior[d];
}
for ( int d = 0; d < disp_range; d++ ) {
curr_cost[d]-=min;
}
}
void evaluate_path(const int *prior, const int *local,
int path_intensity_gradient, int *curr_cost ,
const int nx, const int ny, const int disp_range)
{
memcpy(curr_cost, local, sizeof(int)*disp_range);
for ( int d = 0; d < disp_range; d++ ) {
int e_smooth = std::numeric_limits<int>::max();
for ( int d_p = 0; d_p < disp_range; d_p++ ) {
if ( d_p - d == 0 ) {
// No penality
e_smooth = MMIN(e_smooth,prior[d_p]);
} else if ( abs(d_p - d) == 1 ) {
// Small penality
e_smooth = MMIN(e_smooth,prior[d_p]+PENALTY1);
} else {
// Large penality
e_smooth =
MMIN(e_smooth,prior[d_p] +
MMAX(PENALTY1,
path_intensity_gradient ? PENALTY2/path_intensity_gradient : PENALTY2));
}
}
curr_cost[d] += e_smooth;
}
int min = std::numeric_limits<int>::max();
for ( int d = 0; d < disp_range; d++ ) {
if (prior[d]<min) min=prior[d];
}
for ( int d = 0; d < disp_range; d++ ) {
curr_cost[d]-=min;
}
}
void create_disparity_view( const int *accumulated_costs , int * disp_image,
const int nx, const int ny, const int disp_range)
{
for ( int j = 0; j < ny; j++ ) {
for ( int i = 0; i < nx; i++ ) {
DISP_IMAGE(i,j) =
4 * find_min_index( &ACCUMULATED_COSTS(i,j,0), disp_range );
}
}
}
__global__
void d_create_disparity_view( const int *accumulated_costs , int * disp_image,
const int nx, const int ny, const int disp_range)
{
int i = blockIdx.x * blockDim.x + threadIdx.x;
int j = blockIdx.y * blockDim.y + threadIdx.y;
if(j < ny && i < nx)
DISP_IMAGE(i,j) = 4 * d_find_min_index( &ACCUMULATED_COSTS(i,j,0), disp_range );
}
/*
* Links:
* http://www.dlr.de/rmc/rm/en/desktopdefault.aspx/tabid-9389/16104_read-39811/
* http://lunokhod.org/?p=1356
*/
// sgm code to run on the host
void sgmHost( const int *h_leftIm, const int *h_rightIm,
int *h_dispIm,
const int w, const int h, const int disp_range)
{
const int nx = w;
const int ny = h;
// Processing all costs. W*H*D. D= disp_range
int *costs = (int *) calloc(nx*ny*disp_range,sizeof(int));
if (costs == NULL) {
fprintf(stderr, "sgm_cuda:"
" Failed memory allocation(s).\n");
exit(1);
}
determine_costs(h_leftIm, h_rightIm, costs, nx, ny, disp_range);
/*
// check if everything is ok
printf("HOST\n");
for(int r = 0; r < nx*ny*disp_range; r++) {
printf("%d\t", costs[r]);
printf("%d\n", r);
} */
int *accumulated_costs = (int *) calloc(nx*ny*disp_range,sizeof(int));
int *dir_accumulated_costs = (int *) calloc(nx*ny*disp_range,sizeof(int));
if (accumulated_costs == NULL || dir_accumulated_costs == NULL) {
fprintf(stderr, "sgm_cuda:"
" Failed memory allocation(s).\n");
exit(1);
}
int dirx=0,diry=0;
for(dirx=-1; dirx<2; dirx++) {
if(dirx==0 && diry==0) continue;
std::fill(dir_accumulated_costs, dir_accumulated_costs+nx*ny*disp_range, 0);
iterate_direction( dirx,diry, h_leftIm, costs, dir_accumulated_costs, nx, ny, disp_range);
inplace_sum_views( accumulated_costs, dir_accumulated_costs, nx, ny, disp_range);
}
dirx=0;
for(diry=-1; diry<2; diry++) {
if(dirx==0 && diry==0) continue;
std::fill(dir_accumulated_costs, dir_accumulated_costs+nx*ny*disp_range, 0);
iterate_direction( dirx,diry, h_leftIm, costs, dir_accumulated_costs, nx, ny, disp_range);
inplace_sum_views( accumulated_costs, dir_accumulated_costs, nx, ny, disp_range);
}
free(costs);
free(dir_accumulated_costs);
create_disparity_view( accumulated_costs, h_dispIm, nx, ny, disp_range );
free(accumulated_costs);
}
// sgm code to run on the GPU
void sgmDevice( const int *h_leftIm, const int *h_rightIm,
int *h_dispImD,
const int w, const int h, const int disp_range )
{
const int nx = w;
const int ny = h;
const int image_size = nx * ny * sizeof(int); // image size in bytes
const int costs_block_dim = image_size * disp_range; // costs block volume
int *d_leftIm;
int *d_rightIm;
int *d_costs;
int *d_dispImD;
// Processing all costs. W*H*D. D= disp_range
int *costs = (int *) calloc(nx*ny*disp_range,sizeof(int));
if (costs == NULL) {
fprintf(stderr, "sgm_cuda:"
" Failed memory allocation(s).\n");
exit(1);
}
hipMalloc((void **) &d_leftIm, image_size);
hipMalloc((void **) &d_rightIm, image_size);
hipMalloc((void **) &d_costs, costs_block_dim);
hipMalloc((void **) &d_dispImD, image_size);
hipMemcpy(d_leftIm, h_leftIm, image_size, hipMemcpyHostToDevice);
hipMemcpy(d_rightIm, h_rightIm, image_size, hipMemcpyHostToDevice);
int block_z = disp_range;
int block_x, block_y;
if(nx > ny) { // wider image
block_y = ((int) ceil(float(nx/ny)));
block_x = floor(float(512/disp_range));
}
else if(ny > nx) { // higher image */
block_y = floor(float(512/disp_range));
block_x = ((int) ceil(float(nx/ny)));
}
else { // square image
block_y = floor(float(sqrt(floor(float(512/disp_range)))));
block_x = block_y;
}
int grid_x = ceil((float) nx / block_x);
int grid_y = ceil((float) ny / block_y);
/*printf("\nnx: %d", nx);
printf("\nny: %d", ny);
printf("\nblock_x: %d", block_x);
printf("\nblock_y: %d", block_y);
printf("\nblock_z: %d", block_z);
printf("\ngrid_x: %d", grid_x);
printf("\ngrid_y: %d", grid_y);*/
dim3 block(block_x, block_y, block_z);
dim3 grid(grid_x, grid_y);
hipLaunchKernelGGL(( d_determine_costs) , dim3(grid), dim3(block) , 0, 0, d_leftIm, d_rightIm, d_costs, nx, ny, disp_range);
hipMemcpy(costs, d_costs, costs_block_dim, hipMemcpyDeviceToHost);
// check if everything is ok
/*printf("DEVICE\n");
for(int r = 0; r < costs_block_dim/sizeof(int); r++) {
printf("%d\t", costs[r]);
printf("%d\n", r);
}*/
int *d_accumulated_costs;
int *d_dir_accumulated_costs;
hipMalloc((void **) &d_accumulated_costs, costs_block_dim);
hipMalloc((void **) &d_dir_accumulated_costs, costs_block_dim);
int *accumulated_costs = (int *) calloc(nx*ny*disp_range,sizeof(int));
int *dir_accumulated_costs = (int *) calloc(nx*ny*disp_range,sizeof(int));
if (accumulated_costs == NULL || dir_accumulated_costs == NULL) {
fprintf(stderr, "sgm_cuda:"
" Failed memory allocation(s).\n");
exit(1);
}
int dirx=0,diry=0;
for(dirx=-1; dirx<2; dirx++) {
if(dirx==0 && diry==0) continue;
std::fill(dir_accumulated_costs, dir_accumulated_costs+nx*ny*disp_range, 0);
iterate_direction( dirx,diry, h_leftIm, costs, dir_accumulated_costs, nx, ny, disp_range);
hipMemcpy(d_dir_accumulated_costs, dir_accumulated_costs, costs_block_dim, hipMemcpyHostToDevice);
hipMemcpy(d_accumulated_costs, accumulated_costs, costs_block_dim, hipMemcpyHostToDevice);
hipLaunchKernelGGL(( d_inplace_sum_views), dim3(grid), dim3(block) , 0, 0, d_accumulated_costs, d_dir_accumulated_costs, nx, ny, disp_range);
hipMemcpy(accumulated_costs, d_accumulated_costs, costs_block_dim, hipMemcpyDeviceToHost);
hipMemcpy(dir_accumulated_costs, d_dir_accumulated_costs, costs_block_dim, hipMemcpyDeviceToHost);
}
dirx=0;
for(diry=-1; diry<2; diry++) {
if(dirx==0 && diry==0) continue;
std::fill(dir_accumulated_costs, dir_accumulated_costs+nx*ny*disp_range, 0);
iterate_direction( dirx,diry, h_leftIm, costs, dir_accumulated_costs, nx, ny, disp_range);
hipMemcpy(d_accumulated_costs, accumulated_costs, costs_block_dim, hipMemcpyHostToDevice);
hipMemcpy(d_dir_accumulated_costs, dir_accumulated_costs, costs_block_dim, hipMemcpyHostToDevice);
hipLaunchKernelGGL(( d_inplace_sum_views) , dim3(grid), dim3(block) , 0, 0, d_accumulated_costs, d_dir_accumulated_costs, nx, ny, disp_range);
hipMemcpy(accumulated_costs, d_accumulated_costs, costs_block_dim, hipMemcpyDeviceToHost);
hipMemcpy(dir_accumulated_costs, d_dir_accumulated_costs, costs_block_dim, hipMemcpyDeviceToHost);
}
hipFree(d_leftIm);
hipFree(d_rightIm);
free(costs);
free(dir_accumulated_costs);
hipFree(d_dir_accumulated_costs);
hipMemcpy(d_accumulated_costs, accumulated_costs, costs_block_dim, hipMemcpyHostToDevice);
hipMemcpy(d_dispImD, h_dispImD, image_size, hipMemcpyHostToDevice );
dim3 block_im(block_x, block_y);
hipLaunchKernelGGL(( d_create_disparity_view) , dim3(grid), dim3(block_im) , 0, 0, d_accumulated_costs, d_dispImD, nx, ny, disp_range );
hipMemcpy(h_dispImD, d_dispImD, image_size, hipMemcpyDeviceToHost);
free(accumulated_costs);
hipFree(d_accumulated_costs);
hipFree(d_dispImD);
}
// print command line format
void usage(char *command)
{
printf("Usage: %s [-h] [-d device] [-l leftimage] [-r rightimage] [-o dev_dispimage] [-t host_dispimage] [-p disprange] \n",command);
}
// main
int main( int argc, char** argv)
{
// default command line options
int deviceId = 0;
int disp_range = 32;
char *leftIn =(char *)"lbull.pgm",
*rightIn =(char *)"rbull.pgm",
*fileOut =(char *)"d_dbull.pgm",
*referenceOut=(char *)"h_dbull.pgm";
// parse command line arguments
int opt;
while( (opt = getopt(argc,argv,"d:l:o:r:t:p:h")) !=-1)
{
switch(opt)
{
case 'd': // device
if(sscanf(optarg,"%d",&deviceId)!=1)
{
usage(argv[0]);
exit(1);
}
break;
case 'l': // left image filename
if(strlen(optarg)==0)
{
usage(argv[0]);
exit(1);
}
leftIn = strdup(optarg);
break;
case 'r': // right image filename
if(strlen(optarg)==0)
{
usage(argv[0]);
exit(1);
}
rightIn = strdup(optarg);
break;
case 'o': // output image (from device) filename
if(strlen(optarg)==0)
{
usage(argv[0]);
exit(1);
}
fileOut = strdup(optarg);
break;
case 't': // output image (from host) filename
if(strlen(optarg)==0)
{
usage(argv[0]);
exit(1);
}
referenceOut = strdup(optarg);
break;
case 'p': // disp_range
if(sscanf(optarg,"%d",&disp_range)==0)
{
usage(argv[0]);
exit(1);
}
break;
case 'h': // help
usage(argv[0]);
exit(0);
break;
}
}
if(optind < argc) {
fprintf(stderr,"Error in arguments\n");
usage(argv[0]);
exit(1);
}
// select cuda device
cutilSafeCall( hipSetDevice( deviceId ) );
// create events to measure host sgm time and device sgm time
hipEvent_t startH, stopH, startD, stopD;
hipEventCreate(&startH);
hipEventCreate(&stopH);
hipEventCreate(&startD);
hipEventCreate(&stopD);
// allocate host memory
int* h_ldata=NULL;
int* h_rdata=NULL;
unsigned int h,w;
//load left pgm
if (cutLoadPGMi(leftIn, (unsigned int **)&h_ldata, &w, &h) != CUTTrue) {
printf("Failed to load image file: %s\n", leftIn);
exit(1);
}
//load right pgm
if (cutLoadPGMi(rightIn, (unsigned int **)&h_rdata, &w, &h) != CUTTrue) {
printf("Failed to load image file: %s\n", rightIn);
exit(1);
}
// allocate mem for the result on host side
int* h_odata = (int*) malloc( h*w*sizeof(int));
int* reference = (int*) malloc( h*w*sizeof(int));
// sgm at host
hipEventRecord( startH, 0 );
sgmHost(h_ldata, h_rdata, reference, w, h, disp_range);
hipEventRecord( stopH, 0 );
hipEventSynchronize( stopH );
// sgm at GPU
hipEventRecord( startD, 0 );
sgmDevice(h_ldata, h_rdata, h_odata, w, h, disp_range);
hipEventRecord( stopD, 0 );
hipEventSynchronize( stopD );
// check if kernel execution generated and error
cutilCheckMsg("Kernel execution failed");
float timeH, timeD;
hipEventElapsedTime( &timeH, startH, stopH );
printf( "Host processing time: %f (ms)\n", timeH);
hipEventElapsedTime( &timeD, startD, stopD );
printf( "Device processing time: %f (ms)\n", timeD);
// save output images
if (cutSavePGMi(referenceOut, (unsigned int *)reference, w, h) != CUTTrue) {
printf("Failed to save image file: %s\n", referenceOut);
exit(1);
}
if (cutSavePGMi(fileOut,(unsigned int *) h_odata, w, h) != CUTTrue) {
printf("Failed to save image file: %s\n", fileOut);
exit(1);
}
// cleanup memory
cutFree( h_ldata);
cutFree( h_rdata);
free( h_odata);
free( reference);
cutilDeviceReset();
}
| 8fd623c2e1127fd0ebc01b9b5fae2c0606844d6f.cu |
// Based on CUDA SDK template from NVIDIA
// sgm algorithm adapted from http://lunokhod.org/?p=1403
// includes, system
#include <stdlib.h>
#include <stdio.h>
#include <string.h>
#include <math.h>
#include <unistd.h>
#include <assert.h>
#include <float.h>
#include <stdlib.h>
#include <limits>
#include <algorithm>
#include <climits>
// includes, project
#include <cutil_inline.h>
#define MMAX_BRIGHTNESS 255
#define PENALTY1 15
#define PENALTY2 100
#define COSTS(i,j,d) costs[(i)*disp_range+(j)*nx*disp_range+(d)]
#define ACCUMULATED_COSTS(i,j,d) accumulated_costs[(i)*disp_range+(j)*nx*disp_range+(d)]
#define LEFT_IMAGE(i,j) left_image[(i)+(j)*nx]
#define RIGHT_IMAGE(i,j) right_image[(i)+(j)*nx]
#define DISP_IMAGE(i,j) disp_image[(i)+(j)*nx]
#define MMAX(a,b) (((a)>(b))?(a):(b))
#define MMIN(a,b) (((a)<(b))?(a):(b))
/* function headers */
void determine_costs(const int *left_image, const int *right_image, int *costs,
const int nx, const int ny, const int disp_range);
void evaluate_path( const int *prior, const int* local,
int path_intensity_gradient, int *curr_cost,
const int nx, const int ny, const int disp_range );
__device__ void d_evaluate_path( const int *prior, const int* local,
int path_intensity_gradient, int *curr_cost,
const int nx, const int ny, const int disp_range );
void iterate_direction_dirxpos(const int dirx, const int *left_image,
const int* costs, int *accumulated_costs,
const int nx, const int ny, const int disp_range );
void iterate_direction_dirypos(const int diry, const int *left_image,
const int* costs, int *accumulated_costs,
const int nx, const int ny, const int disp_range );
void iterate_direction_dirxneg(const int dirx, const int *left_image,
const int* costs, int *accumulated_costs,
const int nx, const int ny, const int disp_range );
void iterate_direction_diryneg(const int diry, const int *left_image,
const int* costs, int *accumulated_costs,
const int nx, const int ny, const int disp_range );
void iterate_direction( const int dirx, const int diry, const int *left_image,
const int* costs, int *accumulated_costs,
const int nx, const int ny, const int disp_range ) ;
void inplace_sum_views( int * im1, const int * im2,
const int nx, const int ny, const int disp_range ) ;
int find_min_index( const int *v, const int dist_range ) ;
__device__ int d_find_min_index( const int *v, const int dist_range ) ;
void create_disparity_view( const int *accumulated_costs , int * disp_image, int nx, int ny) ;
void sgmHost( const int *h_leftIm, const int *h_rightIm,
int *h_dispIm,
const int w, const int h, const int disp_range );
void sgmDevice( const int *h_leftIm, const int *h_rightIm,
int *h_dispImD,
const int w, const int h, const int disp_range );
void usage(char *command);
/* Kernels */
__global__
void d_determine_costs( const int *left_image, const int *right_image, int *costs,
const int nx, const int ny, const int disp_range) {
int i = blockIdx.x * blockDim.x + threadIdx.x;
int j = blockIdx.y * blockDim.y + threadIdx.y;
int d = threadIdx.z;
//COSTS(i,j,d) = 255u;
if(j < ny && i < nx && d < disp_range) {
COSTS(i,j,d) = 255u;
if(i >= d) {
COSTS(i,j,d) = abs( LEFT_IMAGE(i,j) - RIGHT_IMAGE(i-d,j) );
}
}
}
__global__
void d_iterate_direction_dirxpos(const int dirx, const int *left_image,
const int* costs, int *accumulated_costs,
const int nx, const int ny, const int disp_range )
{
int i = blockIdx.x * blockDim.x + threadIdx.x;
int j = blockIdx.y * blockDim.y + threadIdx.y;
int d = threadIdx.z;
const int WIDTH = nx;
const int HEIGHT = ny;
if(j < HEIGHT && i < WIDTH)
if(i==0){
if(d < disp_range)
ACCUMULATED_COSTS(0,j,d) += COSTS(0,j,d);
} else {
d_evaluate_path( &ACCUMULATED_COSTS(i-dirx,j,0),
&COSTS(i,j,0),
abs(LEFT_IMAGE(i,j)-LEFT_IMAGE(i-dirx,j)) ,
&ACCUMULATED_COSTS(i,j,0), nx, ny, disp_range);
}
}
__global__
void d_iterate_direction_dirypos(const int diry, const int *left_image,
const int* costs, int *accumulated_costs,
const int nx, const int ny, const int disp_range )
{
int i = blockIdx.x * blockDim.x + threadIdx.x;
int j = blockIdx.y * blockDim.y + threadIdx.y;
int d = threadIdx.z;
const int WIDTH = nx;
const int HEIGHT = ny;
if(i < WIDTH && j < HEIGHT)
if(j == 0)
if(d < disp_range)
ACCUMULATED_COSTS(i,0,d) += COSTS(i,0,d);
else
d_evaluate_path( &ACCUMULATED_COSTS(i,j-diry,0),
&COSTS(i,j,0),
abs(LEFT_IMAGE(i,j)-LEFT_IMAGE(i,j-diry)),
&ACCUMULATED_COSTS(i,j,0), nx, ny, disp_range );
/*
for ( int i = 0; i < WIDTH; i++ ) {
for ( int j = 0; j < HEIGHT; j++ ) {
if(j==0) {
for ( int d = 0; d < disp_range; d++ ) {
ACCUMULATED_COSTS(i,0,d) += COSTS(i,0,d);
}
}
else {
evaluate_path( &ACCUMULATED_COSTS(i,j-diry,0),
&COSTS(i,j,0),
abs(LEFT_IMAGE(i,j)-LEFT_IMAGE(i,j-diry)),
&ACCUMULATED_COSTS(i,j,0), nx, ny, disp_range );
}
}
}*/
}
__global__
void d_iterate_direction_dirxneg(const int dirx, const int *left_image,
const int* costs, int *accumulated_costs,
const int nx, const int ny, const int disp_range )
{
int i = blockIdx.x * blockDim.x + threadIdx.x;
int j = blockIdx.y * blockDim.y + threadIdx.y;
int d = threadIdx.z;
const int WIDTH = nx;
const int HEIGHT = ny;
if(j < HEIGHT && i >= 0){
if(i == WIDTH-1){
if(d < disp_range) {
ACCUMULATED_COSTS(WIDTH-1,j,d) += COSTS(WIDTH-1,j,d);
}
else {
d_evaluate_path( &ACCUMULATED_COSTS(i-dirx,j,0),
&COSTS(i,j,0),
abs(LEFT_IMAGE(i,j)-LEFT_IMAGE(i-dirx,j)),
&ACCUMULATED_COSTS(i,j,0), nx, ny, disp_range );
}
}
}
/*
for ( int j = 0; j < HEIGHT; j++ ) {
for ( int i = WIDTH-1; i >= 0; i-- ) {
if(i==WIDTH-1) {
for ( int d = 0; d < disp_range; d++ ) {
ACCUMULATED_COSTS(WIDTH-1,j,d) += COSTS(WIDTH-1,j,d);
}
}
else {
evaluate_path( &ACCUMULATED_COSTS(i-dirx,j,0),
&COSTS(i,j,0),
abs(LEFT_IMAGE(i,j)-LEFT_IMAGE(i-dirx,j)),
&ACCUMULATED_COSTS(i,j,0), nx, ny, disp_range );
}
}
}*/
}
__global__
void d_iterate_direction_diryneg(const int diry, const int *left_image,
const int* costs, int *accumulated_costs,
const int nx, const int ny, const int disp_range )
{
int i = blockIdx.x * blockDim.x + threadIdx.x;
int j = blockIdx.y * blockDim.y + threadIdx.y;
int d = threadIdx.z;
const int WIDTH = nx;
const int HEIGHT = ny;
if(i < WIDTH && j >= 0) {
if(j == HEIGHT-1) {
ACCUMULATED_COSTS(i,HEIGHT-1,d) += COSTS(i,HEIGHT-1,d);
}
else {
d_evaluate_path( &ACCUMULATED_COSTS(i,j-diry,0),
&COSTS(i,j,0),
abs(LEFT_IMAGE(i,j)-LEFT_IMAGE(i,j-diry)),
&ACCUMULATED_COSTS(i,j,0) , nx, ny, disp_range);
}
}
/*
for ( int i = 0; i < WIDTH; i++ ) {
for ( int j = HEIGHT-1; j >= 0; j-- ) {
if(j==HEIGHT-1) {
for ( int d = 0; d < disp_range; d++ ) {
ACCUMULATED_COSTS(i,HEIGHT-1,d) += COSTS(i,HEIGHT-1,d);
}
}
else {
evaluate_path( &ACCUMULATED_COSTS(i,j-diry,0),
&COSTS(i,j,0),
abs(LEFT_IMAGE(i,j)-LEFT_IMAGE(i,j-diry)),
&ACCUMULATED_COSTS(i,j,0) , nx, ny, disp_range);
}
}
}
*/
}
/* functions code */
void determine_costs(const int *left_image, const int *right_image, int *costs,
const int nx, const int ny, const int disp_range)
{
std::fill(costs, costs+nx*ny*disp_range, 255u);
for ( int j = 0; j < ny; j++ ) {
for ( int d = 0; d < disp_range; d++ ) {
for ( int i = d; i < nx; i++ ) {
COSTS(i,j,d) = abs( LEFT_IMAGE(i,j) - RIGHT_IMAGE(i-d,j) );
}
}
}
}
void iterate_direction_dirxpos(const int dirx, const int *left_image,
const int* costs, int *accumulated_costs,
const int nx, const int ny, const int disp_range )
{
const int WIDTH = nx;
const int HEIGHT = ny;
for ( int j = 0; j < HEIGHT; j++ ) {
for ( int i = 0; i < WIDTH; i++ ) {
if(i==0) {
for ( int d = 0; d < disp_range; d++ ) {
ACCUMULATED_COSTS(0,j,d) += COSTS(0,j,d);
}
}
else {
evaluate_path( &ACCUMULATED_COSTS(i-dirx,j,0),
&COSTS(i,j,0),
abs(LEFT_IMAGE(i,j)-LEFT_IMAGE(i-dirx,j)) ,
&ACCUMULATED_COSTS(i,j,0), nx, ny, disp_range);
}
}
}
}
void iterate_direction_dirypos(const int diry, const int *left_image,
const int* costs, int *accumulated_costs,
const int nx, const int ny, const int disp_range )
{
const int WIDTH = nx;
const int HEIGHT = ny;
for ( int i = 0; i < WIDTH; i++ ) {
for ( int j = 0; j < HEIGHT; j++ ) {
if(j==0) {
for ( int d = 0; d < disp_range; d++ ) {
ACCUMULATED_COSTS(i,0,d) += COSTS(i,0,d);
}
}
else {
evaluate_path( &ACCUMULATED_COSTS(i,j-diry,0),
&COSTS(i,j,0),
abs(LEFT_IMAGE(i,j)-LEFT_IMAGE(i,j-diry)),
&ACCUMULATED_COSTS(i,j,0), nx, ny, disp_range );
}
}
}
}
void iterate_direction_dirxneg(const int dirx, const int *left_image,
const int* costs, int *accumulated_costs,
const int nx, const int ny, const int disp_range )
{
const int WIDTH = nx;
const int HEIGHT = ny;
for ( int j = 0; j < HEIGHT; j++ ) {
for ( int i = WIDTH-1; i >= 0; i-- ) {
if(i==WIDTH-1) {
for ( int d = 0; d < disp_range; d++ ) {
ACCUMULATED_COSTS(WIDTH-1,j,d) += COSTS(WIDTH-1,j,d);
}
}
else {
evaluate_path( &ACCUMULATED_COSTS(i-dirx,j,0),
&COSTS(i,j,0),
abs(LEFT_IMAGE(i,j)-LEFT_IMAGE(i-dirx,j)),
&ACCUMULATED_COSTS(i,j,0), nx, ny, disp_range );
}
}
}
}
void iterate_direction_diryneg(const int diry, const int *left_image,
const int* costs, int *accumulated_costs,
const int nx, const int ny, const int disp_range )
{
const int WIDTH = nx;
const int HEIGHT = ny;
for ( int i = 0; i < WIDTH; i++ ) {
for ( int j = HEIGHT-1; j >= 0; j-- ) {
if(j==HEIGHT-1) {
for ( int d = 0; d < disp_range; d++ ) {
ACCUMULATED_COSTS(i,HEIGHT-1,d) += COSTS(i,HEIGHT-1,d);
}
}
else {
evaluate_path( &ACCUMULATED_COSTS(i,j-diry,0),
&COSTS(i,j,0),
abs(LEFT_IMAGE(i,j)-LEFT_IMAGE(i,j-diry)),
&ACCUMULATED_COSTS(i,j,0) , nx, ny, disp_range);
}
}
}
}
void iterate_direction( const int dirx, const int diry, const int *left_image,
const int* costs, int *accumulated_costs,
const int nx, const int ny, const int disp_range )
{
// Walk along the edges in a clockwise fashion
if ( dirx > 0 ) {
// LEFT MOST EDGE
// Process every pixel along this edge
iterate_direction_dirxpos(dirx,left_image,costs,accumulated_costs, nx, ny, disp_range);
}
else if ( diry > 0 ) {
// TOP MOST EDGE
// Process every pixel along this edge only if dirx ==
// 0. Otherwise skip the top left most pixel
iterate_direction_dirypos(diry,left_image,costs,accumulated_costs, nx, ny, disp_range);
}
else if ( dirx < 0 ) {
// RIGHT MOST EDGE
// Process every pixel along this edge only if diry ==
// 0. Otherwise skip the top right most pixel
iterate_direction_dirxneg(dirx,left_image,costs,accumulated_costs, nx, ny, disp_range);
}
else if ( diry < 0 ) {
// BOTTOM MOST EDGE
// Process every pixel along this edge only if dirx ==
// 0. Otherwise skip the bottom left and bottom right pixel
iterate_direction_diryneg(diry,left_image,costs,accumulated_costs, nx, ny, disp_range);
}
}
void d_iterate_direction( const int dirx, const int diry, const int *left_image,
const int* costs, int *accumulated_costs,
const int nx, const int ny, const int disp_range, dim3* grid, dim3* block )
{
// Walk along the edges in a clockwise fashion
if ( dirx > 0 ) {
// LEFT MOST EDGE
// Process every pixel along this edge
d_iterate_direction_dirxpos<<< *grid, *block >>>(dirx,left_image,costs,accumulated_costs, nx, ny, disp_range);
}
else if ( diry > 0 ) {
// TOP MOST EDGE
// Process every pixel along this edge only if dirx ==
// 0. Otherwise skip the top left most pixel
d_iterate_direction_dirypos<<< *grid, *block >>>(diry,left_image,costs,accumulated_costs, nx, ny, disp_range);
}
else if ( dirx < 0 ) {
// RIGHT MOST EDGE
// Process every pixel along this edge only if diry ==
// 0. Otherwise skip the top right most pixel
d_iterate_direction_dirxneg<<< *grid, *block >>>(dirx,left_image,costs,accumulated_costs, nx, ny, disp_range);
}
else if ( diry < 0 ) {
// BOTTOM MOST EDGE
// Process every pixel along this edge only if dirx ==
// 0. Otherwise skip the bottom left and bottom right pixel
d_iterate_direction_diryneg<<< *grid, *block >>> (diry,left_image,costs,accumulated_costs, nx, ny, disp_range);
}
}
// ADD two cost images
void inplace_sum_views( int * im1, const int * im2,
const int nx, const int ny, const int disp_range )
{
int *im1_init = im1;
while ( im1 != (im1_init + (nx*ny*disp_range)) ) {
*im1 += *im2;
im1++;
im2++;
}
}
// ADD two cost images
__global__ void d_inplace_sum_views( int * im1, const int * im2,
const int nx, const int ny, const int disp_range )
{
int i = blockIdx.x * blockDim.x + threadIdx.x;
int j = blockIdx.y * blockDim.y + threadIdx.y;
int d = threadIdx.z;
int current_idx = ((i)*disp_range+(j)*nx*disp_range+(d));
if(j < ny && i < nx && d < disp_range) {
im1[current_idx] += im2[current_idx];
}
/*int *im1_init = im1;
if(im1 != (im1_init + (nx*ny*disp_range))) {
*im1 += *im2;
im1++;
im2++;
}*/
}
__device__ int d_find_min_index( const int *v, const int disp_range )
{
int min = INT_MAX;
int minind = -1;
for (int d=0; d < disp_range; d++) {
if(v[d]<min) {
min = v[d];
minind = d;
}
}
return minind;
}
int find_min_index( const int *v, const int disp_range )
{
int min = std::numeric_limits<int>::max();
int minind = -1;
for (int d=0; d < disp_range; d++) {
if(v[d]<min) {
min = v[d];
minind = d;
}
}
return minind;
}
__device__ void d_evaluate_path(const int *prior, const int *local,
int path_intensity_gradient, int *curr_cost ,
const int nx, const int ny, const int disp_range)
{
memcpy(curr_cost, local, sizeof(int)*disp_range);
for ( int d = 0; d < disp_range; d++ ) {
int e_smooth = INT_MAX;
for ( int d_p = 0; d_p < disp_range; d_p++ ) {
if ( d_p - d == 0 ) {
// No penality
e_smooth = MMIN(e_smooth,prior[d_p]);
} else if ( abs(d_p - d) == 1 ) {
// Small penality
e_smooth = MMIN(e_smooth,prior[d_p]+PENALTY1);
} else {
// Large penality
e_smooth =
MMIN(e_smooth,prior[d_p] +
MMAX(PENALTY1,
path_intensity_gradient ? PENALTY2/path_intensity_gradient : PENALTY2));
}
}
curr_cost[d] += e_smooth;
}
int min = INT_MAX;
for ( int d = 0; d < disp_range; d++ ) {
if (prior[d]<min) min=prior[d];
}
for ( int d = 0; d < disp_range; d++ ) {
curr_cost[d]-=min;
}
}
void evaluate_path(const int *prior, const int *local,
int path_intensity_gradient, int *curr_cost ,
const int nx, const int ny, const int disp_range)
{
memcpy(curr_cost, local, sizeof(int)*disp_range);
for ( int d = 0; d < disp_range; d++ ) {
int e_smooth = std::numeric_limits<int>::max();
for ( int d_p = 0; d_p < disp_range; d_p++ ) {
if ( d_p - d == 0 ) {
// No penality
e_smooth = MMIN(e_smooth,prior[d_p]);
} else if ( abs(d_p - d) == 1 ) {
// Small penality
e_smooth = MMIN(e_smooth,prior[d_p]+PENALTY1);
} else {
// Large penality
e_smooth =
MMIN(e_smooth,prior[d_p] +
MMAX(PENALTY1,
path_intensity_gradient ? PENALTY2/path_intensity_gradient : PENALTY2));
}
}
curr_cost[d] += e_smooth;
}
int min = std::numeric_limits<int>::max();
for ( int d = 0; d < disp_range; d++ ) {
if (prior[d]<min) min=prior[d];
}
for ( int d = 0; d < disp_range; d++ ) {
curr_cost[d]-=min;
}
}
void create_disparity_view( const int *accumulated_costs , int * disp_image,
const int nx, const int ny, const int disp_range)
{
for ( int j = 0; j < ny; j++ ) {
for ( int i = 0; i < nx; i++ ) {
DISP_IMAGE(i,j) =
4 * find_min_index( &ACCUMULATED_COSTS(i,j,0), disp_range );
}
}
}
__global__
void d_create_disparity_view( const int *accumulated_costs , int * disp_image,
const int nx, const int ny, const int disp_range)
{
int i = blockIdx.x * blockDim.x + threadIdx.x;
int j = blockIdx.y * blockDim.y + threadIdx.y;
if(j < ny && i < nx)
DISP_IMAGE(i,j) = 4 * d_find_min_index( &ACCUMULATED_COSTS(i,j,0), disp_range );
}
/*
* Links:
* http://www.dlr.de/rmc/rm/en/desktopdefault.aspx/tabid-9389/16104_read-39811/
* http://lunokhod.org/?p=1356
*/
// sgm code to run on the host
void sgmHost( const int *h_leftIm, const int *h_rightIm,
int *h_dispIm,
const int w, const int h, const int disp_range)
{
const int nx = w;
const int ny = h;
// Processing all costs. W*H*D. D= disp_range
int *costs = (int *) calloc(nx*ny*disp_range,sizeof(int));
if (costs == NULL) {
fprintf(stderr, "sgm_cuda:"
" Failed memory allocation(s).\n");
exit(1);
}
determine_costs(h_leftIm, h_rightIm, costs, nx, ny, disp_range);
/*
// check if everything is ok
printf("HOST\n");
for(int r = 0; r < nx*ny*disp_range; r++) {
printf("%d\t", costs[r]);
printf("%d\n", r);
} */
int *accumulated_costs = (int *) calloc(nx*ny*disp_range,sizeof(int));
int *dir_accumulated_costs = (int *) calloc(nx*ny*disp_range,sizeof(int));
if (accumulated_costs == NULL || dir_accumulated_costs == NULL) {
fprintf(stderr, "sgm_cuda:"
" Failed memory allocation(s).\n");
exit(1);
}
int dirx=0,diry=0;
for(dirx=-1; dirx<2; dirx++) {
if(dirx==0 && diry==0) continue;
std::fill(dir_accumulated_costs, dir_accumulated_costs+nx*ny*disp_range, 0);
iterate_direction( dirx,diry, h_leftIm, costs, dir_accumulated_costs, nx, ny, disp_range);
inplace_sum_views( accumulated_costs, dir_accumulated_costs, nx, ny, disp_range);
}
dirx=0;
for(diry=-1; diry<2; diry++) {
if(dirx==0 && diry==0) continue;
std::fill(dir_accumulated_costs, dir_accumulated_costs+nx*ny*disp_range, 0);
iterate_direction( dirx,diry, h_leftIm, costs, dir_accumulated_costs, nx, ny, disp_range);
inplace_sum_views( accumulated_costs, dir_accumulated_costs, nx, ny, disp_range);
}
free(costs);
free(dir_accumulated_costs);
create_disparity_view( accumulated_costs, h_dispIm, nx, ny, disp_range );
free(accumulated_costs);
}
// sgm code to run on the GPU
void sgmDevice( const int *h_leftIm, const int *h_rightIm,
int *h_dispImD,
const int w, const int h, const int disp_range )
{
const int nx = w;
const int ny = h;
const int image_size = nx * ny * sizeof(int); // image size in bytes
const int costs_block_dim = image_size * disp_range; // costs block volume
int *d_leftIm;
int *d_rightIm;
int *d_costs;
int *d_dispImD;
// Processing all costs. W*H*D. D= disp_range
int *costs = (int *) calloc(nx*ny*disp_range,sizeof(int));
if (costs == NULL) {
fprintf(stderr, "sgm_cuda:"
" Failed memory allocation(s).\n");
exit(1);
}
cudaMalloc((void **) &d_leftIm, image_size);
cudaMalloc((void **) &d_rightIm, image_size);
cudaMalloc((void **) &d_costs, costs_block_dim);
cudaMalloc((void **) &d_dispImD, image_size);
cudaMemcpy(d_leftIm, h_leftIm, image_size, cudaMemcpyHostToDevice);
cudaMemcpy(d_rightIm, h_rightIm, image_size, cudaMemcpyHostToDevice);
int block_z = disp_range;
int block_x, block_y;
if(nx > ny) { // wider image
block_y = ((int) ceil(float(nx/ny)));
block_x = floor(float(512/disp_range));
}
else if(ny > nx) { // higher image */
block_y = floor(float(512/disp_range));
block_x = ((int) ceil(float(nx/ny)));
}
else { // square image
block_y = floor(float(sqrt(floor(float(512/disp_range)))));
block_x = block_y;
}
int grid_x = ceil((float) nx / block_x);
int grid_y = ceil((float) ny / block_y);
/*printf("\nnx: %d", nx);
printf("\nny: %d", ny);
printf("\nblock_x: %d", block_x);
printf("\nblock_y: %d", block_y);
printf("\nblock_z: %d", block_z);
printf("\ngrid_x: %d", grid_x);
printf("\ngrid_y: %d", grid_y);*/
dim3 block(block_x, block_y, block_z);
dim3 grid(grid_x, grid_y);
d_determine_costs <<< grid, block >>> (d_leftIm, d_rightIm, d_costs, nx, ny, disp_range);
cudaMemcpy(costs, d_costs, costs_block_dim, cudaMemcpyDeviceToHost);
// check if everything is ok
/*printf("DEVICE\n");
for(int r = 0; r < costs_block_dim/sizeof(int); r++) {
printf("%d\t", costs[r]);
printf("%d\n", r);
}*/
int *d_accumulated_costs;
int *d_dir_accumulated_costs;
cudaMalloc((void **) &d_accumulated_costs, costs_block_dim);
cudaMalloc((void **) &d_dir_accumulated_costs, costs_block_dim);
int *accumulated_costs = (int *) calloc(nx*ny*disp_range,sizeof(int));
int *dir_accumulated_costs = (int *) calloc(nx*ny*disp_range,sizeof(int));
if (accumulated_costs == NULL || dir_accumulated_costs == NULL) {
fprintf(stderr, "sgm_cuda:"
" Failed memory allocation(s).\n");
exit(1);
}
int dirx=0,diry=0;
for(dirx=-1; dirx<2; dirx++) {
if(dirx==0 && diry==0) continue;
std::fill(dir_accumulated_costs, dir_accumulated_costs+nx*ny*disp_range, 0);
iterate_direction( dirx,diry, h_leftIm, costs, dir_accumulated_costs, nx, ny, disp_range);
cudaMemcpy(d_dir_accumulated_costs, dir_accumulated_costs, costs_block_dim, cudaMemcpyHostToDevice);
cudaMemcpy(d_accumulated_costs, accumulated_costs, costs_block_dim, cudaMemcpyHostToDevice);
d_inplace_sum_views<<< grid, block >>>( d_accumulated_costs, d_dir_accumulated_costs, nx, ny, disp_range);
cudaMemcpy(accumulated_costs, d_accumulated_costs, costs_block_dim, cudaMemcpyDeviceToHost);
cudaMemcpy(dir_accumulated_costs, d_dir_accumulated_costs, costs_block_dim, cudaMemcpyDeviceToHost);
}
dirx=0;
for(diry=-1; diry<2; diry++) {
if(dirx==0 && diry==0) continue;
std::fill(dir_accumulated_costs, dir_accumulated_costs+nx*ny*disp_range, 0);
iterate_direction( dirx,diry, h_leftIm, costs, dir_accumulated_costs, nx, ny, disp_range);
cudaMemcpy(d_accumulated_costs, accumulated_costs, costs_block_dim, cudaMemcpyHostToDevice);
cudaMemcpy(d_dir_accumulated_costs, dir_accumulated_costs, costs_block_dim, cudaMemcpyHostToDevice);
d_inplace_sum_views <<< grid, block >>>( d_accumulated_costs, d_dir_accumulated_costs, nx, ny, disp_range);
cudaMemcpy(accumulated_costs, d_accumulated_costs, costs_block_dim, cudaMemcpyDeviceToHost);
cudaMemcpy(dir_accumulated_costs, d_dir_accumulated_costs, costs_block_dim, cudaMemcpyDeviceToHost);
}
cudaFree(d_leftIm);
cudaFree(d_rightIm);
free(costs);
free(dir_accumulated_costs);
cudaFree(d_dir_accumulated_costs);
cudaMemcpy(d_accumulated_costs, accumulated_costs, costs_block_dim, cudaMemcpyHostToDevice);
cudaMemcpy(d_dispImD, h_dispImD, image_size, cudaMemcpyHostToDevice );
dim3 block_im(block_x, block_y);
d_create_disparity_view <<< grid, block_im >>> ( d_accumulated_costs, d_dispImD, nx, ny, disp_range );
cudaMemcpy(h_dispImD, d_dispImD, image_size, cudaMemcpyDeviceToHost);
free(accumulated_costs);
cudaFree(d_accumulated_costs);
cudaFree(d_dispImD);
}
// print command line format
void usage(char *command)
{
printf("Usage: %s [-h] [-d device] [-l leftimage] [-r rightimage] [-o dev_dispimage] [-t host_dispimage] [-p disprange] \n",command);
}
// main
int main( int argc, char** argv)
{
// default command line options
int deviceId = 0;
int disp_range = 32;
char *leftIn =(char *)"lbull.pgm",
*rightIn =(char *)"rbull.pgm",
*fileOut =(char *)"d_dbull.pgm",
*referenceOut=(char *)"h_dbull.pgm";
// parse command line arguments
int opt;
while( (opt = getopt(argc,argv,"d:l:o:r:t:p:h")) !=-1)
{
switch(opt)
{
case 'd': // device
if(sscanf(optarg,"%d",&deviceId)!=1)
{
usage(argv[0]);
exit(1);
}
break;
case 'l': // left image filename
if(strlen(optarg)==0)
{
usage(argv[0]);
exit(1);
}
leftIn = strdup(optarg);
break;
case 'r': // right image filename
if(strlen(optarg)==0)
{
usage(argv[0]);
exit(1);
}
rightIn = strdup(optarg);
break;
case 'o': // output image (from device) filename
if(strlen(optarg)==0)
{
usage(argv[0]);
exit(1);
}
fileOut = strdup(optarg);
break;
case 't': // output image (from host) filename
if(strlen(optarg)==0)
{
usage(argv[0]);
exit(1);
}
referenceOut = strdup(optarg);
break;
case 'p': // disp_range
if(sscanf(optarg,"%d",&disp_range)==0)
{
usage(argv[0]);
exit(1);
}
break;
case 'h': // help
usage(argv[0]);
exit(0);
break;
}
}
if(optind < argc) {
fprintf(stderr,"Error in arguments\n");
usage(argv[0]);
exit(1);
}
// select cuda device
cutilSafeCall( cudaSetDevice( deviceId ) );
// create events to measure host sgm time and device sgm time
cudaEvent_t startH, stopH, startD, stopD;
cudaEventCreate(&startH);
cudaEventCreate(&stopH);
cudaEventCreate(&startD);
cudaEventCreate(&stopD);
// allocate host memory
int* h_ldata=NULL;
int* h_rdata=NULL;
unsigned int h,w;
//load left pgm
if (cutLoadPGMi(leftIn, (unsigned int **)&h_ldata, &w, &h) != CUTTrue) {
printf("Failed to load image file: %s\n", leftIn);
exit(1);
}
//load right pgm
if (cutLoadPGMi(rightIn, (unsigned int **)&h_rdata, &w, &h) != CUTTrue) {
printf("Failed to load image file: %s\n", rightIn);
exit(1);
}
// allocate mem for the result on host side
int* h_odata = (int*) malloc( h*w*sizeof(int));
int* reference = (int*) malloc( h*w*sizeof(int));
// sgm at host
cudaEventRecord( startH, 0 );
sgmHost(h_ldata, h_rdata, reference, w, h, disp_range);
cudaEventRecord( stopH, 0 );
cudaEventSynchronize( stopH );
// sgm at GPU
cudaEventRecord( startD, 0 );
sgmDevice(h_ldata, h_rdata, h_odata, w, h, disp_range);
cudaEventRecord( stopD, 0 );
cudaEventSynchronize( stopD );
// check if kernel execution generated and error
cutilCheckMsg("Kernel execution failed");
float timeH, timeD;
cudaEventElapsedTime( &timeH, startH, stopH );
printf( "Host processing time: %f (ms)\n", timeH);
cudaEventElapsedTime( &timeD, startD, stopD );
printf( "Device processing time: %f (ms)\n", timeD);
// save output images
if (cutSavePGMi(referenceOut, (unsigned int *)reference, w, h) != CUTTrue) {
printf("Failed to save image file: %s\n", referenceOut);
exit(1);
}
if (cutSavePGMi(fileOut,(unsigned int *) h_odata, w, h) != CUTTrue) {
printf("Failed to save image file: %s\n", fileOut);
exit(1);
}
// cleanup memory
cutFree( h_ldata);
cutFree( h_rdata);
free( h_odata);
free( reference);
cutilDeviceReset();
}
|
29691628204bbbb566f8d03ada5398f19ca1d7ca.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
// Homework 1
// Color to Greyscale Conversion
//A common way to represent color images is known as RGBA - the color
//is specified by how much Red, Green, and Blue is in it.
//The 'A' stands for Alpha and is used for transparency; it will be
//ignored in this homework.
//Each channel Red, Blue, Green, and Alpha is represented by one byte.
//Since we are using one byte for each color there are 256 different
//possible values for each color. This means we use 4 bytes per pixel.
//Greyscale images are represented by a single intensity value per pixel
//which is one byte in size.
//To convert an image from color to grayscale one simple method is to
//set the intensity to the average of the RGB channels. But we will
//use a more sophisticated method that takes into account how the eye
//perceives color and weights the channels unequally.
//The eye responds most strongly to green followed by red and then blue.
//The NTSC (National Television System Committee) recommends the following
//formula for color to greyscale conversion:
//I = .299f * R + .587f * G + .114f * B
//Notice the trailing f's on the numbers which indicate that they are
//single precision floating point constants and not double precision
//constants.
//You should fill in the kernel as well as set the block and grid sizes
//so that the entire image is processed.
#include "reference_calc.cpp"
#include "utils.h"
#include <stdio.h>
__global__
void rgba_to_greyscale(const uchar4* const rgbaImage,
unsigned char* const greyImage,
int numRows, int numCols)
{
int xpos = threadIdx.x + blockIdx.x*blockDim.x;
int ypos = threadIdx.y + blockIdx.y*blockDim.y;
if(xpos < numCols && ypos < numRows){
int pos = xpos * numRows + ypos;
uchar4 rgba = rgbaImage[pos];
float greyval = .299f * rgba.x +\
.587f * rgba.y +\
.114f * rgba.z;
greyImage[pos] = greyval;
}
}
void your_rgba_to_greyscale(const uchar4 * const h_rgbaImage, uchar4 * const d_rgbaImage,
unsigned char* const d_greyImage, size_t numRows, size_t numCols)
{
//You must fill in the correct sizes for the blockSize and gridSize
//currently only one block with one thread is being launched
//first calculate number of total threads to run
size_t arrsize = numRows*numCols;
// try 16* 16 grid (256 threads in grid)
//const dim3 blockSize(32,32,1);
const dim3 blockSize(numCols,1,1);
size_t gridCols = (numCols + blockSize.x - 1) / blockSize.x;
size_t gridRows = (numRows + blockSize.y - 1) / blockSize.y;
//const dim3 gridSize(gridCols, gridRows, 1); //TODO
const dim3 gridSize(1, numRows, 1); //TODO
hipLaunchKernelGGL(( rgba_to_greyscale), dim3(gridSize), dim3(blockSize), 0, 0, d_rgbaImage, d_greyImage, numRows, numCols);
hipDeviceSynchronize(); checkCudaErrors(hipGetLastError());
}
| 29691628204bbbb566f8d03ada5398f19ca1d7ca.cu | // Homework 1
// Color to Greyscale Conversion
//A common way to represent color images is known as RGBA - the color
//is specified by how much Red, Green, and Blue is in it.
//The 'A' stands for Alpha and is used for transparency; it will be
//ignored in this homework.
//Each channel Red, Blue, Green, and Alpha is represented by one byte.
//Since we are using one byte for each color there are 256 different
//possible values for each color. This means we use 4 bytes per pixel.
//Greyscale images are represented by a single intensity value per pixel
//which is one byte in size.
//To convert an image from color to grayscale one simple method is to
//set the intensity to the average of the RGB channels. But we will
//use a more sophisticated method that takes into account how the eye
//perceives color and weights the channels unequally.
//The eye responds most strongly to green followed by red and then blue.
//The NTSC (National Television System Committee) recommends the following
//formula for color to greyscale conversion:
//I = .299f * R + .587f * G + .114f * B
//Notice the trailing f's on the numbers which indicate that they are
//single precision floating point constants and not double precision
//constants.
//You should fill in the kernel as well as set the block and grid sizes
//so that the entire image is processed.
#include "reference_calc.cpp"
#include "utils.h"
#include <stdio.h>
__global__
void rgba_to_greyscale(const uchar4* const rgbaImage,
unsigned char* const greyImage,
int numRows, int numCols)
{
int xpos = threadIdx.x + blockIdx.x*blockDim.x;
int ypos = threadIdx.y + blockIdx.y*blockDim.y;
if(xpos < numCols && ypos < numRows){
int pos = xpos * numRows + ypos;
uchar4 rgba = rgbaImage[pos];
float greyval = .299f * rgba.x +\
.587f * rgba.y +\
.114f * rgba.z;
greyImage[pos] = greyval;
}
}
void your_rgba_to_greyscale(const uchar4 * const h_rgbaImage, uchar4 * const d_rgbaImage,
unsigned char* const d_greyImage, size_t numRows, size_t numCols)
{
//You must fill in the correct sizes for the blockSize and gridSize
//currently only one block with one thread is being launched
//first calculate number of total threads to run
size_t arrsize = numRows*numCols;
// try 16* 16 grid (256 threads in grid)
//const dim3 blockSize(32,32,1);
const dim3 blockSize(numCols,1,1);
size_t gridCols = (numCols + blockSize.x - 1) / blockSize.x;
size_t gridRows = (numRows + blockSize.y - 1) / blockSize.y;
//const dim3 gridSize(gridCols, gridRows, 1); //TODO
const dim3 gridSize(1, numRows, 1); //TODO
rgba_to_greyscale<<<gridSize, blockSize>>>(d_rgbaImage, d_greyImage, numRows, numCols);
cudaDeviceSynchronize(); checkCudaErrors(cudaGetLastError());
}
|
1574c18485e98ccb9d7a2f4f72c9a9abf49bd82d.hip | // !!! This is a file automatically generated by hipify!!!
#include <iostream>
#include <ctime>
#include <hip/hip_runtime.h>
#include <hip/hip_runtime.h> // Stops underlining of __global__
#include <device_launch_parameters.h> // Stops underlining of threadIdx etc.
#include "FindClosestCPU.h"
#include "FindClosestGPU.h"
using namespace std;
int main()
{
// Number of points
const int count = 10000;
// Arrays of points
int *indexOfClosest = new int[count];
float3 *points = new float3[count];
float3* d_points; // GPU version
int* d_indexOfClosest;
// Create a list of random points
for(int i = 0; i < count; i++)
{
points[i].x = (float)((rand()%10000) - 5000);
points[i].y = (float)((rand()%10000) - 5000);
points[i].z = (float)((rand()%10000) - 5000);
}
hipMalloc(&d_points, sizeof(float3) * count);
hipMemcpy(d_points, points, sizeof(float3) * count, hipMemcpyHostToDevice);
hipMalloc(&d_indexOfClosest, sizeof(int) * count);
// This variable is used to keep track of the fastest time so far
long fastest = 1000000;
// Run the algorithm 20 times
for(int q = 0; q < 20; q++)
{
long startTime = clock();
// Run the algorithm
//FindClosestCPU(points, indexOfClosest, count);
hipLaunchKernelGGL(( FindClosestGPU), dim3((count / 320)+1), dim3(320), 0, 0, d_points, d_indexOfClosest, count);
hipMemcpy(indexOfClosest, d_indexOfClosest, sizeof(int) * count, hipMemcpyDeviceToHost);
long finishTime = clock();
cout<<q<<" "<<(finishTime - startTime)<<endl;
// If that run was faster update the fastest time so far
if((finishTime - startTime) < fastest)
fastest = (finishTime - startTime);
}
// Print out the fastest time
cout<<"Fastest time: "<<fastest<<endl;
// Print the final results to screen
cout<<"Final results:"<<endl;
for(int i = 0; i < 10; i++)
cout<<i<<"."<<indexOfClosest[i]<<endl;
// Deallocate ram
delete[] indexOfClosest;
delete[] points;
hipFree(d_points);
hipFree(d_indexOfClosest);
hipDeviceReset();
return 0;
} | 1574c18485e98ccb9d7a2f4f72c9a9abf49bd82d.cu | #include <iostream>
#include <ctime>
#include <cuda.h>
#include <cuda_runtime.h> // Stops underlining of __global__
#include <device_launch_parameters.h> // Stops underlining of threadIdx etc.
#include "FindClosestCPU.h"
#include "FindClosestGPU.h"
using namespace std;
int main()
{
// Number of points
const int count = 10000;
// Arrays of points
int *indexOfClosest = new int[count];
float3 *points = new float3[count];
float3* d_points; // GPU version
int* d_indexOfClosest;
// Create a list of random points
for(int i = 0; i < count; i++)
{
points[i].x = (float)((rand()%10000) - 5000);
points[i].y = (float)((rand()%10000) - 5000);
points[i].z = (float)((rand()%10000) - 5000);
}
cudaMalloc(&d_points, sizeof(float3) * count);
cudaMemcpy(d_points, points, sizeof(float3) * count, cudaMemcpyHostToDevice);
cudaMalloc(&d_indexOfClosest, sizeof(int) * count);
// This variable is used to keep track of the fastest time so far
long fastest = 1000000;
// Run the algorithm 20 times
for(int q = 0; q < 20; q++)
{
long startTime = clock();
// Run the algorithm
//FindClosestCPU(points, indexOfClosest, count);
FindClosestGPU<<<(count / 320)+1, 320>>>(d_points, d_indexOfClosest, count);
cudaMemcpy(indexOfClosest, d_indexOfClosest, sizeof(int) * count, cudaMemcpyDeviceToHost);
long finishTime = clock();
cout<<q<<" "<<(finishTime - startTime)<<endl;
// If that run was faster update the fastest time so far
if((finishTime - startTime) < fastest)
fastest = (finishTime - startTime);
}
// Print out the fastest time
cout<<"Fastest time: "<<fastest<<endl;
// Print the final results to screen
cout<<"Final results:"<<endl;
for(int i = 0; i < 10; i++)
cout<<i<<"."<<indexOfClosest[i]<<endl;
// Deallocate ram
delete[] indexOfClosest;
delete[] points;
cudaFree(d_points);
cudaFree(d_indexOfClosest);
cudaDeviceReset();
return 0;
} |
de0d889bc5344e74eac84a91e6b81ddc8bc466af.hip | // !!! This is a file automatically generated by hipify!!!
/**************************************************************************
*
* set up GPU for processing
*
**************************************************************************/
#include "gpu_main.h"
#include <hip/hip_runtime.h>
#include <stdio.h>
// #include <hip/hip_runtime.h>
// #define BackgroundRed 0.141f
// #define BackgroundGreen 0.212f
// #define BackgroundBlue 0.396f
#define BackgroundRed 0.0f
#define BackgroundGreen 0.0f
#define BackgroundBlue 0.0f
// #define AttractorRed 0.545f
// #define AttractorGreen 0.847f
// #define AttractorBlue 0.741f
#define AttractorRed 0.709f
// #define AttractorGreen 0.439f
// #define AttractorBlue 0.439f
#define AttractorGreen 0.0f
#define AttractorBlue 0.0
#define zInitialSize 3
#define zScale 1.1f
#define FadeSpeed 0.01f
#define HeatTransferSpeed 0.05f
texture<float, 2> texRed;
texture<float, 2> texGreen;
texture<float, 2> texBlue;
/******************************************************************************/
GPU_Palette initGPUPalette(unsigned int imageWidth, unsigned int imageHeight)
{
GPU_Palette X;
X.gThreads.x = 32; // 32 x 32 = 1024 threads per block
X.gThreads.y = 32;
X.gThreads.z = 1;
X.gBlocks.x = ceil(imageWidth / 32); // however many blocks ng++ -w -c interface.cpp $(F1) $(F2) $(F3) $(F4)eeded for image
X.gBlocks.y = ceil(imageHeight / 32);
X.gBlocks.z = 1;
X.palette_width = imageWidth; // save this info
X.palette_height = imageHeight;
X.num_pixels = imageWidth * imageHeight;
// allocate memory on GPU corresponding to pixel colors:
hipError_t err;
err = hipMalloc((void**)&X.red, X.num_pixels * sizeof(float));
if (err != hipSuccess) {
printf("cuda error allocating red = %s\n", hipGetErrorString(err));
exit(EXIT_FAILURE);
}
hipMalloc((void**)&X.green, X.num_pixels * sizeof(float)); // g
if (err != hipSuccess) {
printf("cuda error allocating green = %s\n", hipGetErrorString(err));
exit(EXIT_FAILURE);
}
hipMalloc((void**)&X.blue, X.num_pixels * sizeof(float)); // b
if (err != hipSuccess) {
printf("cuda error allocating blue = %s\n", hipGetErrorString(err));
exit(EXIT_FAILURE);
}
hipChannelFormatDesc desc = hipCreateChannelDesc<float>();
hipBindTexture2D(NULL, texRed, X.red, desc, imageWidth, imageHeight, sizeof(float) * imageWidth);
hipBindTexture2D(NULL, texGreen, X.red, desc, imageWidth, imageHeight, sizeof(float) * imageWidth);
hipBindTexture2D(NULL, texBlue, X.red, desc, imageWidth, imageHeight, sizeof(float) * imageWidth);
return X;
}
/******************************************************************************/
void freeGPUPalette(GPU_Palette* P)
{
hipUnbindTexture(texRed);
hipUnbindTexture(texGreen);
hipUnbindTexture(texBlue);
hipFree(P->red);
hipFree(P->green);
hipFree(P->blue);
}
/******************************************************************************/
int updatePalette(GPU_Palette* P, APoint (&points)[5])
// int updatePalette(GPU_Palette* P, int xIdx, int yIdx)
{
for (int i = 0; i < 5; i++) {
hipLaunchKernelGGL(( updateReds), dim3(P->gBlocks), dim3(P->gThreads), 0, 0, P->red, points[i].xIdx, points[i].yIdx, points[i].z);
hipLaunchKernelGGL(( updateGreens), dim3(P->gBlocks), dim3(P->gThreads), 0, 0, P->green, points[i].xIdx, points[i].yIdx, points[i].z);
hipLaunchKernelGGL(( updateBlues), dim3(P->gBlocks), dim3(P->gThreads), 0, 0, P->blue, points[i].xIdx, points[i].yIdx, points[i].z);
}
return 0;
}
/******************************************************************************/
__global__ void updateReds(float* red, int xIdx, int yIdx, float zIdx)
{
// float size = 5 + (zIdx * 0.1);
float size = zInitialSize + zIdx * zScale;
int x = threadIdx.x + (blockIdx.x * blockDim.x);
int y = threadIdx.y + (blockIdx.y * blockDim.y);
int vecIdx = x + (y * blockDim.x * gridDim.x);
float top, left, center, right, bot;
top = tex2D(texRed, x, y + 1);
left = tex2D(texRed, x - 1, y);
center = tex2D(texRed, x, y);
right = tex2D(texRed, x + 1, y);
bot = tex2D(texRed, x, y - 1);
// red[vecIdx] =(top + bot + right + left + center) / 5.0;
if (sqrtf(powf((x - xIdx), 2) + powf((y - yIdx), 2)) < size) {
red[vecIdx] = AttractorRed;
} else {
float heat_average = (top + bot + right + left + center) / (5 - HeatTransferSpeed);
// if (heat_average > BackgroundRed) {
// red[vecIdx] += 0.001;
// }
if (heat_average >= AttractorRed) {
red[vecIdx] = AttractorRed / 2;
} else {
red[vecIdx] = heat_average;
}
red[vecIdx] -= FadeSpeed * red[vecIdx];
if (red[vecIdx] < BackgroundRed)
red[vecIdx] = BackgroundRed;
if (red[vecIdx] > AttractorRed)
red[vecIdx] = AttractorRed;
}
}
/******************************************************************************/
__global__ void updateGreens(float* green, int xIdx, int yIdx, float zIdx)
{
float size = zInitialSize + zIdx * zScale;
int x = threadIdx.x + (blockIdx.x * blockDim.x);
int y = threadIdx.y + (blockIdx.y * blockDim.y);
int vecIdx = x + (y * blockDim.x * gridDim.x);
float top, left, center, right, bot;
top = tex2D(texRed, x, y + 1);
left = tex2D(texRed, x - 1, y);
center = tex2D(texRed, x, y);
right = tex2D(texRed, x + 1, y);
bot = tex2D(texRed, x, y - 1);
// green[vecIdx] = center + HeatTransfered * center);
// green[vecIdx] =(top + bot + right + left + center) / 5.0;
if (sqrtf(powf((x - xIdx), 2) + powf((y - yIdx), 2)) < size) {
green[vecIdx] = AttractorGreen;
} else {
float heat_average = (top + bot + right + left + center) / (5 - HeatTransferSpeed);
if (heat_average >= AttractorGreen) {
green[vecIdx] = AttractorGreen / 2;
} else {
green[vecIdx] = heat_average;
}
green[vecIdx] -= FadeSpeed * green[vecIdx];
if (green[vecIdx] < BackgroundGreen)
green[vecIdx] = BackgroundGreen;
if (green[vecIdx] > AttractorGreen)
green[vecIdx] = AttractorGreen;
}
}
/******************************************************************************/
__global__ void updateBlues(float* blue, int xIdx, int yIdx, float zIdx)
{
float size = zInitialSize + zIdx * zScale;
int x = threadIdx.x + (blockIdx.x * blockDim.x);
int y = threadIdx.y + (blockIdx.y * blockDim.y);
int vecIdx = x + (y * blockDim.x * gridDim.x);
float top, left, center, right, bot;
top = tex2D(texRed, x, y + 1);
left = tex2D(texRed, x - 1, y);
center = tex2D(texRed, x, y);
right = tex2D(texRed, x + 1, y);
bot = tex2D(texRed, x, y - 1);
// blue[vecIdx] = center + FadeSpeed * (top + bot + right + left - 4 * center);
// blue[vecIdx] =(top + bot + right + left + center) / 5.0;
if (sqrtf(powf((x - xIdx), 2) + powf((y - yIdx), 2)) < size) {
blue[vecIdx] = AttractorBlue;
} else {
blue[vecIdx] -= FadeSpeed * blue[vecIdx];
if (blue[vecIdx] < BackgroundBlue)
blue[vecIdx] = BackgroundBlue;
// if (blue[vecIdx] > AttractorBlue)
// blue[vecIdx] = AttractorBlue;
}
}
/******************************************************************************/
| de0d889bc5344e74eac84a91e6b81ddc8bc466af.cu | /**************************************************************************
*
* set up GPU for processing
*
**************************************************************************/
#include "gpu_main.h"
#include <cuda.h>
#include <stdio.h>
// #include <cuda_runtime.h>
// #define BackgroundRed 0.141f
// #define BackgroundGreen 0.212f
// #define BackgroundBlue 0.396f
#define BackgroundRed 0.0f
#define BackgroundGreen 0.0f
#define BackgroundBlue 0.0f
// #define AttractorRed 0.545f
// #define AttractorGreen 0.847f
// #define AttractorBlue 0.741f
#define AttractorRed 0.709f
// #define AttractorGreen 0.439f
// #define AttractorBlue 0.439f
#define AttractorGreen 0.0f
#define AttractorBlue 0.0
#define zInitialSize 3
#define zScale 1.1f
#define FadeSpeed 0.01f
#define HeatTransferSpeed 0.05f
texture<float, 2> texRed;
texture<float, 2> texGreen;
texture<float, 2> texBlue;
/******************************************************************************/
GPU_Palette initGPUPalette(unsigned int imageWidth, unsigned int imageHeight)
{
GPU_Palette X;
X.gThreads.x = 32; // 32 x 32 = 1024 threads per block
X.gThreads.y = 32;
X.gThreads.z = 1;
X.gBlocks.x = ceil(imageWidth / 32); // however many blocks ng++ -w -c interface.cpp $(F1) $(F2) $(F3) $(F4)eeded for image
X.gBlocks.y = ceil(imageHeight / 32);
X.gBlocks.z = 1;
X.palette_width = imageWidth; // save this info
X.palette_height = imageHeight;
X.num_pixels = imageWidth * imageHeight;
// allocate memory on GPU corresponding to pixel colors:
cudaError_t err;
err = cudaMalloc((void**)&X.red, X.num_pixels * sizeof(float));
if (err != cudaSuccess) {
printf("cuda error allocating red = %s\n", cudaGetErrorString(err));
exit(EXIT_FAILURE);
}
cudaMalloc((void**)&X.green, X.num_pixels * sizeof(float)); // g
if (err != cudaSuccess) {
printf("cuda error allocating green = %s\n", cudaGetErrorString(err));
exit(EXIT_FAILURE);
}
cudaMalloc((void**)&X.blue, X.num_pixels * sizeof(float)); // b
if (err != cudaSuccess) {
printf("cuda error allocating blue = %s\n", cudaGetErrorString(err));
exit(EXIT_FAILURE);
}
cudaChannelFormatDesc desc = cudaCreateChannelDesc<float>();
cudaBindTexture2D(NULL, texRed, X.red, desc, imageWidth, imageHeight, sizeof(float) * imageWidth);
cudaBindTexture2D(NULL, texGreen, X.red, desc, imageWidth, imageHeight, sizeof(float) * imageWidth);
cudaBindTexture2D(NULL, texBlue, X.red, desc, imageWidth, imageHeight, sizeof(float) * imageWidth);
return X;
}
/******************************************************************************/
void freeGPUPalette(GPU_Palette* P)
{
cudaUnbindTexture(texRed);
cudaUnbindTexture(texGreen);
cudaUnbindTexture(texBlue);
cudaFree(P->red);
cudaFree(P->green);
cudaFree(P->blue);
}
/******************************************************************************/
int updatePalette(GPU_Palette* P, APoint (&points)[5])
// int updatePalette(GPU_Palette* P, int xIdx, int yIdx)
{
for (int i = 0; i < 5; i++) {
updateReds<<<P->gBlocks, P->gThreads>>>(P->red, points[i].xIdx, points[i].yIdx, points[i].z);
updateGreens<<<P->gBlocks, P->gThreads>>>(P->green, points[i].xIdx, points[i].yIdx, points[i].z);
updateBlues<<<P->gBlocks, P->gThreads>>>(P->blue, points[i].xIdx, points[i].yIdx, points[i].z);
}
return 0;
}
/******************************************************************************/
__global__ void updateReds(float* red, int xIdx, int yIdx, float zIdx)
{
// float size = 5 + (zIdx * 0.1);
float size = zInitialSize + zIdx * zScale;
int x = threadIdx.x + (blockIdx.x * blockDim.x);
int y = threadIdx.y + (blockIdx.y * blockDim.y);
int vecIdx = x + (y * blockDim.x * gridDim.x);
float top, left, center, right, bot;
top = tex2D(texRed, x, y + 1);
left = tex2D(texRed, x - 1, y);
center = tex2D(texRed, x, y);
right = tex2D(texRed, x + 1, y);
bot = tex2D(texRed, x, y - 1);
// red[vecIdx] =(top + bot + right + left + center) / 5.0;
if (sqrtf(powf((x - xIdx), 2) + powf((y - yIdx), 2)) < size) {
red[vecIdx] = AttractorRed;
} else {
float heat_average = (top + bot + right + left + center) / (5 - HeatTransferSpeed);
// if (heat_average > BackgroundRed) {
// red[vecIdx] += 0.001;
// }
if (heat_average >= AttractorRed) {
red[vecIdx] = AttractorRed / 2;
} else {
red[vecIdx] = heat_average;
}
red[vecIdx] -= FadeSpeed * red[vecIdx];
if (red[vecIdx] < BackgroundRed)
red[vecIdx] = BackgroundRed;
if (red[vecIdx] > AttractorRed)
red[vecIdx] = AttractorRed;
}
}
/******************************************************************************/
__global__ void updateGreens(float* green, int xIdx, int yIdx, float zIdx)
{
float size = zInitialSize + zIdx * zScale;
int x = threadIdx.x + (blockIdx.x * blockDim.x);
int y = threadIdx.y + (blockIdx.y * blockDim.y);
int vecIdx = x + (y * blockDim.x * gridDim.x);
float top, left, center, right, bot;
top = tex2D(texRed, x, y + 1);
left = tex2D(texRed, x - 1, y);
center = tex2D(texRed, x, y);
right = tex2D(texRed, x + 1, y);
bot = tex2D(texRed, x, y - 1);
// green[vecIdx] = center + HeatTransfered * center);
// green[vecIdx] =(top + bot + right + left + center) / 5.0;
if (sqrtf(powf((x - xIdx), 2) + powf((y - yIdx), 2)) < size) {
green[vecIdx] = AttractorGreen;
} else {
float heat_average = (top + bot + right + left + center) / (5 - HeatTransferSpeed);
if (heat_average >= AttractorGreen) {
green[vecIdx] = AttractorGreen / 2;
} else {
green[vecIdx] = heat_average;
}
green[vecIdx] -= FadeSpeed * green[vecIdx];
if (green[vecIdx] < BackgroundGreen)
green[vecIdx] = BackgroundGreen;
if (green[vecIdx] > AttractorGreen)
green[vecIdx] = AttractorGreen;
}
}
/******************************************************************************/
__global__ void updateBlues(float* blue, int xIdx, int yIdx, float zIdx)
{
float size = zInitialSize + zIdx * zScale;
int x = threadIdx.x + (blockIdx.x * blockDim.x);
int y = threadIdx.y + (blockIdx.y * blockDim.y);
int vecIdx = x + (y * blockDim.x * gridDim.x);
float top, left, center, right, bot;
top = tex2D(texRed, x, y + 1);
left = tex2D(texRed, x - 1, y);
center = tex2D(texRed, x, y);
right = tex2D(texRed, x + 1, y);
bot = tex2D(texRed, x, y - 1);
// blue[vecIdx] = center + FadeSpeed * (top + bot + right + left - 4 * center);
// blue[vecIdx] =(top + bot + right + left + center) / 5.0;
if (sqrtf(powf((x - xIdx), 2) + powf((y - yIdx), 2)) < size) {
blue[vecIdx] = AttractorBlue;
} else {
blue[vecIdx] -= FadeSpeed * blue[vecIdx];
if (blue[vecIdx] < BackgroundBlue)
blue[vecIdx] = BackgroundBlue;
// if (blue[vecIdx] > AttractorBlue)
// blue[vecIdx] = AttractorBlue;
}
}
/******************************************************************************/
|
0ec1663c8bc55a89fab6049067164d7cbc6d370b.hip | // !!! This is a file automatically generated by hipify!!!
/**
* Copyright (c) Facebook, Inc. and its affiliates.
*
* This source code is licensed under the MIT license found in the
* LICENSE file in the root directory of this source tree.
*/
#include <faiss/gpu/GpuIndexBinaryFlat.h>
#include <faiss/gpu/GpuResources.h>
#include <faiss/gpu/impl/BinaryFlatIndex.cuh>
#include <faiss/gpu/utils/ConversionOperators.cuh>
#include <faiss/gpu/utils/CopyUtils.cuh>
#include <faiss/gpu/utils/DeviceUtils.h>
namespace faiss { namespace gpu {
/// Default CPU search size for which we use paged copies
constexpr size_t kMinPageSize = (size_t) 256 * 1024 * 1024;
GpuIndexBinaryFlat::GpuIndexBinaryFlat(GpuResourcesProvider* provider,
const faiss::IndexBinaryFlat* index,
GpuIndexBinaryFlatConfig config)
: IndexBinary(index->d),
resources_(provider->getResources()),
binaryFlatConfig_(config) {
FAISS_THROW_IF_NOT_FMT(this->d % 8 == 0,
"vector dimension (number of bits) "
"must be divisible by 8 (passed %d)",
this->d);
// Flat index doesn't need training
this->is_trained = true;
copyFrom(index);
}
GpuIndexBinaryFlat::GpuIndexBinaryFlat(GpuResourcesProvider* provider,
int dims,
GpuIndexBinaryFlatConfig config)
: IndexBinary(dims),
resources_(provider->getResources()),
binaryFlatConfig_(std::move(config)) {
FAISS_THROW_IF_NOT_FMT(this->d % 8 == 0,
"vector dimension (number of bits) "
"must be divisible by 8 (passed %d)",
this->d);
// Flat index doesn't need training
this->is_trained = true;
// Construct index
DeviceScope scope(binaryFlatConfig_.device);
data_.reset(new BinaryFlatIndex(resources_.get(),
this->d, binaryFlatConfig_.memorySpace));
}
GpuIndexBinaryFlat::~GpuIndexBinaryFlat() {
}
int
GpuIndexBinaryFlat::getDevice() const {
return binaryFlatConfig_.device;
}
void
GpuIndexBinaryFlat::copyFrom(const faiss::IndexBinaryFlat* index) {
DeviceScope scope(binaryFlatConfig_.device);
this->d = index->d;
// GPU code has 32 bit indices
FAISS_THROW_IF_NOT_FMT(index->ntotal <=
(faiss::Index::idx_t) std::numeric_limits<int>::max(),
"GPU index only supports up to %zu indices; "
"attempting to copy CPU index with %zu parameters",
(size_t) std::numeric_limits<int>::max(),
(size_t) index->ntotal);
this->ntotal = index->ntotal;
// destroy old first before allocating new
data_.reset();
data_.reset(new BinaryFlatIndex(resources_.get(),
this->d, binaryFlatConfig_.memorySpace));
// The index could be empty
if (index->ntotal > 0) {
data_->add(index->xb.data(),
index->ntotal,
resources_->getDefaultStream(binaryFlatConfig_.device));
}
}
void
GpuIndexBinaryFlat::copyTo(faiss::IndexBinaryFlat* index) const {
DeviceScope scope(binaryFlatConfig_.device);
index->d = this->d;
index->ntotal = this->ntotal;
FAISS_ASSERT(data_);
FAISS_ASSERT(data_->getSize() == this->ntotal);
index->xb.resize(this->ntotal * (this->d / 8));
if (this->ntotal > 0) {
fromDevice(data_->getVectorsRef(),
index->xb.data(),
resources_->getDefaultStream(binaryFlatConfig_.device));
}
}
void
GpuIndexBinaryFlat::add(faiss::IndexBinary::idx_t n,
const uint8_t* x) {
DeviceScope scope(binaryFlatConfig_.device);
// To avoid multiple re-allocations, ensure we have enough storage
// available
data_->reserve(n, resources_->getDefaultStream(binaryFlatConfig_.device));
// Due to GPU indexing in int32, we can't store more than this
// number of vectors on a GPU
FAISS_THROW_IF_NOT_FMT(this->ntotal + n <=
(faiss::Index::idx_t) std::numeric_limits<int>::max(),
"GPU index only supports up to %zu indices",
(size_t) std::numeric_limits<int>::max());
data_->add((const unsigned char*) x,
n,
resources_->getDefaultStream(binaryFlatConfig_.device));
this->ntotal += n;
}
void
GpuIndexBinaryFlat::reset() {
DeviceScope scope(binaryFlatConfig_.device);
// Free the underlying memory
data_->reset();
this->ntotal = 0;
}
void
GpuIndexBinaryFlat::search(faiss::IndexBinary::idx_t n,
const uint8_t* x,
faiss::IndexBinary::idx_t k,
int32_t* distances,
faiss::IndexBinary::idx_t* labels) const {
if (n == 0) {
return;
}
// For now, only support <= max int results
FAISS_THROW_IF_NOT_FMT(n <= (Index::idx_t) std::numeric_limits<int>::max(),
"GPU index only supports up to %zu indices",
(size_t) std::numeric_limits<int>::max());
FAISS_THROW_IF_NOT_FMT(k <= (Index::idx_t) getMaxKSelection(),
"GPU only supports k <= %d (requested %d)",
getMaxKSelection(),
(int) k); // select limitation
DeviceScope scope(binaryFlatConfig_.device);
auto stream = resources_->getDefaultStream(binaryFlatConfig_.device);
// The input vectors may be too large for the GPU, but we still
// assume that the output distances and labels are not.
// Go ahead and make space for output distances and labels on the
// GPU.
// If we reach a point where all inputs are too big, we can add
// another level of tiling.
auto outDistances = toDeviceTemporary<int32_t, 2>(resources_.get(),
binaryFlatConfig_.device,
distances,
stream,
{(int) n, (int) k});
// FlatIndex only supports an interface returning int indices
DeviceTensor<int, 2, true> outIntIndices(
resources_.get(), makeTempAlloc(AllocType::Other, stream),
{(int) n, (int) k});
bool usePaged = false;
if (getDeviceForAddress(x) == -1) {
// It is possible that the user is querying for a vector set size
// `x` that won't fit on the GPU.
// In this case, we will have to handle paging of the data from CPU
// -> GPU.
// Currently, we don't handle the case where the output data won't
// fit on the GPU (e.g., n * k is too large for the GPU memory).
size_t dataSize = (size_t) n * (this->d / 8) * sizeof(uint8_t);
if (dataSize >= kMinPageSize) {
searchFromCpuPaged_(n, x, k,
outDistances.data(),
outIntIndices.data());
usePaged = true;
}
}
if (!usePaged) {
searchNonPaged_(n, x, k,
outDistances.data(),
outIntIndices.data());
}
// Convert and copy int indices out
auto outIndices =
toDeviceTemporary<faiss::Index::idx_t, 2>(resources_.get(),
binaryFlatConfig_.device,
labels,
stream,
{(int) n, (int) k});
// Convert int to long
convertTensor<int, faiss::Index::idx_t, 2>(stream,
outIntIndices,
outIndices);
// Copy back if necessary
fromDevice<int32_t, 2>(outDistances, distances, stream);
fromDevice<faiss::Index::idx_t, 2>(outIndices, labels, stream);
}
void
GpuIndexBinaryFlat::searchNonPaged_(int n,
const uint8_t* x,
int k,
int32_t* outDistancesData,
int* outIndicesData) const {
Tensor<int32_t, 2, true> outDistances(outDistancesData, {n, k});
Tensor<int, 2, true> outIndices(outIndicesData, {n, k});
auto stream = resources_->getDefaultStream(binaryFlatConfig_.device);
// Make sure arguments are on the device we desire; use temporary
// memory allocations to move it if necessary
auto vecs = toDeviceTemporary<uint8_t, 2>(resources_.get(),
binaryFlatConfig_.device,
const_cast<uint8_t*>(x),
stream,
{n, (int) (this->d / 8)});
data_->query(vecs, k, outDistances, outIndices);
}
void
GpuIndexBinaryFlat::searchFromCpuPaged_(int n,
const uint8_t* x,
int k,
int32_t* outDistancesData,
int* outIndicesData) const {
Tensor<int32_t, 2, true> outDistances(outDistancesData, {n, k});
Tensor<int, 2, true> outIndices(outIndicesData, {n, k});
auto vectorSize = sizeof(uint8_t) * (this->d / 8);
// Just page without overlapping copy with compute (as GpuIndexFlat does)
int batchSize = utils::nextHighestPowerOf2(
(int) ((size_t) kMinPageSize / vectorSize));
for (int cur = 0; cur < n; cur += batchSize) {
int num = ::min(batchSize, n - cur);
auto outDistancesSlice = outDistances.narrowOutermost(cur, num);
auto outIndicesSlice = outIndices.narrowOutermost(cur, num);
searchNonPaged_(num,
x + (size_t) cur * (this->d / 8),
k,
outDistancesSlice.data(),
outIndicesSlice.data());
}
}
void
GpuIndexBinaryFlat::reconstruct(faiss::IndexBinary::idx_t key,
uint8_t* out) const {
DeviceScope scope(binaryFlatConfig_.device);
FAISS_THROW_IF_NOT_MSG(key < this->ntotal, "index out of bounds");
auto stream = resources_->getDefaultStream(binaryFlatConfig_.device);
auto& vecs = data_->getVectorsRef();
auto vec = vecs[key];
fromDevice(vec.data(), out, vecs.getSize(1), stream);
}
} } // namespace gpu
| 0ec1663c8bc55a89fab6049067164d7cbc6d370b.cu | /**
* Copyright (c) Facebook, Inc. and its affiliates.
*
* This source code is licensed under the MIT license found in the
* LICENSE file in the root directory of this source tree.
*/
#include <faiss/gpu/GpuIndexBinaryFlat.h>
#include <faiss/gpu/GpuResources.h>
#include <faiss/gpu/impl/BinaryFlatIndex.cuh>
#include <faiss/gpu/utils/ConversionOperators.cuh>
#include <faiss/gpu/utils/CopyUtils.cuh>
#include <faiss/gpu/utils/DeviceUtils.h>
namespace faiss { namespace gpu {
/// Default CPU search size for which we use paged copies
constexpr size_t kMinPageSize = (size_t) 256 * 1024 * 1024;
GpuIndexBinaryFlat::GpuIndexBinaryFlat(GpuResourcesProvider* provider,
const faiss::IndexBinaryFlat* index,
GpuIndexBinaryFlatConfig config)
: IndexBinary(index->d),
resources_(provider->getResources()),
binaryFlatConfig_(config) {
FAISS_THROW_IF_NOT_FMT(this->d % 8 == 0,
"vector dimension (number of bits) "
"must be divisible by 8 (passed %d)",
this->d);
// Flat index doesn't need training
this->is_trained = true;
copyFrom(index);
}
GpuIndexBinaryFlat::GpuIndexBinaryFlat(GpuResourcesProvider* provider,
int dims,
GpuIndexBinaryFlatConfig config)
: IndexBinary(dims),
resources_(provider->getResources()),
binaryFlatConfig_(std::move(config)) {
FAISS_THROW_IF_NOT_FMT(this->d % 8 == 0,
"vector dimension (number of bits) "
"must be divisible by 8 (passed %d)",
this->d);
// Flat index doesn't need training
this->is_trained = true;
// Construct index
DeviceScope scope(binaryFlatConfig_.device);
data_.reset(new BinaryFlatIndex(resources_.get(),
this->d, binaryFlatConfig_.memorySpace));
}
GpuIndexBinaryFlat::~GpuIndexBinaryFlat() {
}
int
GpuIndexBinaryFlat::getDevice() const {
return binaryFlatConfig_.device;
}
void
GpuIndexBinaryFlat::copyFrom(const faiss::IndexBinaryFlat* index) {
DeviceScope scope(binaryFlatConfig_.device);
this->d = index->d;
// GPU code has 32 bit indices
FAISS_THROW_IF_NOT_FMT(index->ntotal <=
(faiss::Index::idx_t) std::numeric_limits<int>::max(),
"GPU index only supports up to %zu indices; "
"attempting to copy CPU index with %zu parameters",
(size_t) std::numeric_limits<int>::max(),
(size_t) index->ntotal);
this->ntotal = index->ntotal;
// destroy old first before allocating new
data_.reset();
data_.reset(new BinaryFlatIndex(resources_.get(),
this->d, binaryFlatConfig_.memorySpace));
// The index could be empty
if (index->ntotal > 0) {
data_->add(index->xb.data(),
index->ntotal,
resources_->getDefaultStream(binaryFlatConfig_.device));
}
}
void
GpuIndexBinaryFlat::copyTo(faiss::IndexBinaryFlat* index) const {
DeviceScope scope(binaryFlatConfig_.device);
index->d = this->d;
index->ntotal = this->ntotal;
FAISS_ASSERT(data_);
FAISS_ASSERT(data_->getSize() == this->ntotal);
index->xb.resize(this->ntotal * (this->d / 8));
if (this->ntotal > 0) {
fromDevice(data_->getVectorsRef(),
index->xb.data(),
resources_->getDefaultStream(binaryFlatConfig_.device));
}
}
void
GpuIndexBinaryFlat::add(faiss::IndexBinary::idx_t n,
const uint8_t* x) {
DeviceScope scope(binaryFlatConfig_.device);
// To avoid multiple re-allocations, ensure we have enough storage
// available
data_->reserve(n, resources_->getDefaultStream(binaryFlatConfig_.device));
// Due to GPU indexing in int32, we can't store more than this
// number of vectors on a GPU
FAISS_THROW_IF_NOT_FMT(this->ntotal + n <=
(faiss::Index::idx_t) std::numeric_limits<int>::max(),
"GPU index only supports up to %zu indices",
(size_t) std::numeric_limits<int>::max());
data_->add((const unsigned char*) x,
n,
resources_->getDefaultStream(binaryFlatConfig_.device));
this->ntotal += n;
}
void
GpuIndexBinaryFlat::reset() {
DeviceScope scope(binaryFlatConfig_.device);
// Free the underlying memory
data_->reset();
this->ntotal = 0;
}
void
GpuIndexBinaryFlat::search(faiss::IndexBinary::idx_t n,
const uint8_t* x,
faiss::IndexBinary::idx_t k,
int32_t* distances,
faiss::IndexBinary::idx_t* labels) const {
if (n == 0) {
return;
}
// For now, only support <= max int results
FAISS_THROW_IF_NOT_FMT(n <= (Index::idx_t) std::numeric_limits<int>::max(),
"GPU index only supports up to %zu indices",
(size_t) std::numeric_limits<int>::max());
FAISS_THROW_IF_NOT_FMT(k <= (Index::idx_t) getMaxKSelection(),
"GPU only supports k <= %d (requested %d)",
getMaxKSelection(),
(int) k); // select limitation
DeviceScope scope(binaryFlatConfig_.device);
auto stream = resources_->getDefaultStream(binaryFlatConfig_.device);
// The input vectors may be too large for the GPU, but we still
// assume that the output distances and labels are not.
// Go ahead and make space for output distances and labels on the
// GPU.
// If we reach a point where all inputs are too big, we can add
// another level of tiling.
auto outDistances = toDeviceTemporary<int32_t, 2>(resources_.get(),
binaryFlatConfig_.device,
distances,
stream,
{(int) n, (int) k});
// FlatIndex only supports an interface returning int indices
DeviceTensor<int, 2, true> outIntIndices(
resources_.get(), makeTempAlloc(AllocType::Other, stream),
{(int) n, (int) k});
bool usePaged = false;
if (getDeviceForAddress(x) == -1) {
// It is possible that the user is querying for a vector set size
// `x` that won't fit on the GPU.
// In this case, we will have to handle paging of the data from CPU
// -> GPU.
// Currently, we don't handle the case where the output data won't
// fit on the GPU (e.g., n * k is too large for the GPU memory).
size_t dataSize = (size_t) n * (this->d / 8) * sizeof(uint8_t);
if (dataSize >= kMinPageSize) {
searchFromCpuPaged_(n, x, k,
outDistances.data(),
outIntIndices.data());
usePaged = true;
}
}
if (!usePaged) {
searchNonPaged_(n, x, k,
outDistances.data(),
outIntIndices.data());
}
// Convert and copy int indices out
auto outIndices =
toDeviceTemporary<faiss::Index::idx_t, 2>(resources_.get(),
binaryFlatConfig_.device,
labels,
stream,
{(int) n, (int) k});
// Convert int to long
convertTensor<int, faiss::Index::idx_t, 2>(stream,
outIntIndices,
outIndices);
// Copy back if necessary
fromDevice<int32_t, 2>(outDistances, distances, stream);
fromDevice<faiss::Index::idx_t, 2>(outIndices, labels, stream);
}
void
GpuIndexBinaryFlat::searchNonPaged_(int n,
const uint8_t* x,
int k,
int32_t* outDistancesData,
int* outIndicesData) const {
Tensor<int32_t, 2, true> outDistances(outDistancesData, {n, k});
Tensor<int, 2, true> outIndices(outIndicesData, {n, k});
auto stream = resources_->getDefaultStream(binaryFlatConfig_.device);
// Make sure arguments are on the device we desire; use temporary
// memory allocations to move it if necessary
auto vecs = toDeviceTemporary<uint8_t, 2>(resources_.get(),
binaryFlatConfig_.device,
const_cast<uint8_t*>(x),
stream,
{n, (int) (this->d / 8)});
data_->query(vecs, k, outDistances, outIndices);
}
void
GpuIndexBinaryFlat::searchFromCpuPaged_(int n,
const uint8_t* x,
int k,
int32_t* outDistancesData,
int* outIndicesData) const {
Tensor<int32_t, 2, true> outDistances(outDistancesData, {n, k});
Tensor<int, 2, true> outIndices(outIndicesData, {n, k});
auto vectorSize = sizeof(uint8_t) * (this->d / 8);
// Just page without overlapping copy with compute (as GpuIndexFlat does)
int batchSize = utils::nextHighestPowerOf2(
(int) ((size_t) kMinPageSize / vectorSize));
for (int cur = 0; cur < n; cur += batchSize) {
int num = std::min(batchSize, n - cur);
auto outDistancesSlice = outDistances.narrowOutermost(cur, num);
auto outIndicesSlice = outIndices.narrowOutermost(cur, num);
searchNonPaged_(num,
x + (size_t) cur * (this->d / 8),
k,
outDistancesSlice.data(),
outIndicesSlice.data());
}
}
void
GpuIndexBinaryFlat::reconstruct(faiss::IndexBinary::idx_t key,
uint8_t* out) const {
DeviceScope scope(binaryFlatConfig_.device);
FAISS_THROW_IF_NOT_MSG(key < this->ntotal, "index out of bounds");
auto stream = resources_->getDefaultStream(binaryFlatConfig_.device);
auto& vecs = data_->getVectorsRef();
auto vec = vecs[key];
fromDevice(vec.data(), out, vecs.getSize(1), stream);
}
} } // namespace gpu
|
a5cbc072f3b3731c6faae69c802faa9c2e708e8b.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/*
* Copyright 1993-2006 NVIDIA Corporation. All rights reserved.
*
* NOTICE TO USER:
*
* This source code is subject to NVIDIA ownership rights under U.S. and
* international Copyright laws.
*
* NVIDIA MAKES NO REPRESENTATION ABOUT THE SUITABILITY OF THIS SOURCE
* CODE FOR ANY PURPOSE. IT IS PROVIDED "AS IS" WITHOUT EXPRESS OR
* IMPLIED WARRANTY OF ANY KIND. NVIDIA DISCLAIMS ALL WARRANTIES WITH
* REGARD TO THIS SOURCE CODE, INCLUDING ALL IMPLIED WARRANTIES OF
* MERCHANTABILITY, NONINFRINGEMENT, AND FITNESS FOR A PARTICULAR PURPOSE.
* IN NO EVENT SHALL NVIDIA BE LIABLE FOR ANY SPECIAL, INDIRECT, INCIDENTAL,
* OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS
* OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE
* OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE
* OR PERFORMANCE OF THIS SOURCE CODE.
*
* U.S. Government End Users. This source code is a "commercial item" as
* that term is defined at 48 C.F.R. 2.101 (OCT 1995), consisting of
* "commercial computer software" and "commercial computer software
* documentation" as such terms are used in 48 C.F.R. 12.212 (SEPT 1995)
* and is provided to the U.S. Government only as a commercial end item.
* Consistent with 48 C.F.R.12.212 and 48 C.F.R. 227.7202-1 through
* 227.7202-4 (JUNE 1995), all U.S. Government End Users acquire the
* source code with only those rights set forth herein.
*/
#ifndef _PRESCAN_CU_
#define _PRESCAN_CU_
// includes, kernels
#include "scanLargeArray_kernel.cu"
#include <assert.h>
#include <stdio.h>
#include "cutil.h"
inline bool
isPowerOfTwo(int n)
{
return ((n&(n-1))==0) ;
}
inline int
floorPow2(int n)
{
#ifdef WIN32
// method 2
return 1 << (int)logb((float)n);
#else
// method 1
// float nf = (float)n;
// return 1 << (((*(int*)&nf) >> 23) - 127);
int exp;
frexp((float)n, &exp);
return 1 << (exp - 1);
#endif
}
#define BLOCK_SIZE 256
int myMax(int x, int y){
return (x > y) ? x : y;
}
static unsigned int** g_scanBlockSums;
static unsigned int g_numEltsAllocated = 0;
static unsigned int g_numLevelsAllocated = 0;
static void preallocBlockSums(unsigned int maxNumElements)
{
assert(g_numEltsAllocated == 0); // shouldn't be called
g_numEltsAllocated = maxNumElements;
unsigned int blockSize = BLOCK_SIZE; // max size of the thread blocks
unsigned int numElts = maxNumElements;
int level = 0;
do {
unsigned int numBlocks = myMax(1, (int)ceil((float)numElts / (2.f * blockSize)));
if (numBlocks > 1) level++;
numElts = numBlocks;
} while (numElts > 1);
g_scanBlockSums = (unsigned int**) malloc(level * sizeof(unsigned int*));
g_numLevelsAllocated = level;
numElts = maxNumElements;
level = 0;
do {
unsigned int numBlocks = myMax(1, (int)ceil((float)numElts / (2.f * blockSize)));
if (numBlocks > 1)
CUDA_SAFE_CALL(hipMalloc((void**) &g_scanBlockSums[level++], numBlocks * sizeof(unsigned int)));
numElts = numBlocks;
} while (numElts > 1);
CUT_CHECK_ERROR("preallocBlockSums");
}
static void deallocBlockSums()
{
for (unsigned int i = 0; i < g_numLevelsAllocated; i++)
{
hipFree(g_scanBlockSums[i]);
}
CUT_CHECK_ERROR("deallocBlockSums");
free((void**)g_scanBlockSums);
g_scanBlockSums = 0;
g_numEltsAllocated = 0;
g_numLevelsAllocated = 0;
}
static void prescanArrayRecursive(unsigned int *outArray,
const unsigned int *inArray,
int numElements,
int level)
{
unsigned int blockSize = BLOCK_SIZE; // max size of the thread blocks
unsigned int numBlocks =
myMax(1, (int)ceil((float)numElements / (2.f * blockSize)));
unsigned int numThreads;
if (numBlocks > 1)
numThreads = blockSize;
else if (isPowerOfTwo(numElements))
numThreads = numElements / 2;
else
numThreads = floorPow2(numElements);
unsigned int numEltsPerBlock = numThreads * 2;
// if this is a non-power-of-2 array, the last block will be non-full
// compute the smallest power of 2 able to compute its scan.
unsigned int numEltsLastBlock =
numElements - (numBlocks-1) * numEltsPerBlock;
unsigned int numThreadsLastBlock = myMax(1, numEltsLastBlock / 2);
unsigned int np2LastBlock = 0;
unsigned int sharedMemLastBlock = 0;
if (numEltsLastBlock != numEltsPerBlock)
{
np2LastBlock = 1;
if(!isPowerOfTwo(numEltsLastBlock))
numThreadsLastBlock = floorPow2(numEltsLastBlock);
unsigned int extraSpace = (2 * numThreadsLastBlock) / NUM_BANKS;
sharedMemLastBlock =
sizeof(unsigned int) * (2 * numThreadsLastBlock + extraSpace);
}
// padding space is used to avoid shared memory bank conflicts
unsigned int extraSpace = numEltsPerBlock / NUM_BANKS;
unsigned int sharedMemSize =
sizeof(unsigned int) * (numEltsPerBlock + extraSpace);
#ifdef DEBUG
if (numBlocks > 1)
{
assert(g_numEltsAllocated >= numElements);
}
#endif
// setup execution parameters
// if NP2, we process the last block separately
dim3 grid(myMax(1, numBlocks - np2LastBlock), 1, 1);
dim3 threads(numThreads, 1, 1);
// make sure there are no CUDA errors before we start
CUT_CHECK_ERROR("prescanArrayRecursive before kernels");
// execute the scan
if (numBlocks > 1)
{
hipLaunchKernelGGL(( prescan<true, false>), dim3(grid), dim3(threads), sharedMemSize , 0, outArray,
inArray,
g_scanBlockSums[level],
numThreads * 2, 0, 0);
CUT_CHECK_ERROR("prescanWithBlockSums");
if (np2LastBlock)
{
hipLaunchKernelGGL(( prescan<true, true>), dim3(1), dim3(numThreadsLastBlock), sharedMemLastBlock , 0,
outArray, inArray, g_scanBlockSums[level], numEltsLastBlock,
numBlocks - 1, numElements - numEltsLastBlock);
CUT_CHECK_ERROR("prescanNP2WithBlockSums");
}
// After scanning all the sub-blocks, we are mostly done. But now we
// need to take all of the last values of the sub-blocks and scan those.
// This will give us a new value that must be sdded to each block to
// get the final results.
// recursive (CPU) call
prescanArrayRecursive(g_scanBlockSums[level],
g_scanBlockSums[level],
numBlocks,
level+1);
hipLaunchKernelGGL(( uniformAdd), dim3(grid), dim3(threads) , 0, 0, outArray,
g_scanBlockSums[level],
numElements - numEltsLastBlock,
0, 0);
CUT_CHECK_ERROR("uniformAdd");
if (np2LastBlock)
{
hipLaunchKernelGGL(( uniformAdd), dim3(1), dim3(numThreadsLastBlock) , 0, 0, outArray,
g_scanBlockSums[level],
numEltsLastBlock,
numBlocks - 1,
numElements - numEltsLastBlock);
CUT_CHECK_ERROR("uniformAdd");
}
}
else if (isPowerOfTwo(numElements))
{
hipLaunchKernelGGL(( prescan<false, false>), dim3(grid), dim3(threads), sharedMemSize , 0, outArray, inArray,
0, numThreads * 2, 0, 0);
CUT_CHECK_ERROR("prescan");
}
else
{
hipLaunchKernelGGL(( prescan<false, true>), dim3(grid), dim3(threads), sharedMemSize , 0, outArray, inArray,
0, numElements, 0, 0);
CUT_CHECK_ERROR("prescanNP2");
}
}
static void prescanArray(unsigned int *outArray, unsigned int *inArray, int numElements)
{
prescanArrayRecursive(outArray, inArray, numElements, 0);
}
#endif // _PRESCAN_CU_
| a5cbc072f3b3731c6faae69c802faa9c2e708e8b.cu | /*
* Copyright 1993-2006 NVIDIA Corporation. All rights reserved.
*
* NOTICE TO USER:
*
* This source code is subject to NVIDIA ownership rights under U.S. and
* international Copyright laws.
*
* NVIDIA MAKES NO REPRESENTATION ABOUT THE SUITABILITY OF THIS SOURCE
* CODE FOR ANY PURPOSE. IT IS PROVIDED "AS IS" WITHOUT EXPRESS OR
* IMPLIED WARRANTY OF ANY KIND. NVIDIA DISCLAIMS ALL WARRANTIES WITH
* REGARD TO THIS SOURCE CODE, INCLUDING ALL IMPLIED WARRANTIES OF
* MERCHANTABILITY, NONINFRINGEMENT, AND FITNESS FOR A PARTICULAR PURPOSE.
* IN NO EVENT SHALL NVIDIA BE LIABLE FOR ANY SPECIAL, INDIRECT, INCIDENTAL,
* OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS
* OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE
* OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE
* OR PERFORMANCE OF THIS SOURCE CODE.
*
* U.S. Government End Users. This source code is a "commercial item" as
* that term is defined at 48 C.F.R. 2.101 (OCT 1995), consisting of
* "commercial computer software" and "commercial computer software
* documentation" as such terms are used in 48 C.F.R. 12.212 (SEPT 1995)
* and is provided to the U.S. Government only as a commercial end item.
* Consistent with 48 C.F.R.12.212 and 48 C.F.R. 227.7202-1 through
* 227.7202-4 (JUNE 1995), all U.S. Government End Users acquire the
* source code with only those rights set forth herein.
*/
#ifndef _PRESCAN_CU_
#define _PRESCAN_CU_
// includes, kernels
#include "scanLargeArray_kernel.cu"
#include <assert.h>
#include <stdio.h>
#include "cutil.h"
inline bool
isPowerOfTwo(int n)
{
return ((n&(n-1))==0) ;
}
inline int
floorPow2(int n)
{
#ifdef WIN32
// method 2
return 1 << (int)logb((float)n);
#else
// method 1
// float nf = (float)n;
// return 1 << (((*(int*)&nf) >> 23) - 127);
int exp;
frexp((float)n, &exp);
return 1 << (exp - 1);
#endif
}
#define BLOCK_SIZE 256
int myMax(int x, int y){
return (x > y) ? x : y;
}
static unsigned int** g_scanBlockSums;
static unsigned int g_numEltsAllocated = 0;
static unsigned int g_numLevelsAllocated = 0;
static void preallocBlockSums(unsigned int maxNumElements)
{
assert(g_numEltsAllocated == 0); // shouldn't be called
g_numEltsAllocated = maxNumElements;
unsigned int blockSize = BLOCK_SIZE; // max size of the thread blocks
unsigned int numElts = maxNumElements;
int level = 0;
do {
unsigned int numBlocks = myMax(1, (int)ceil((float)numElts / (2.f * blockSize)));
if (numBlocks > 1) level++;
numElts = numBlocks;
} while (numElts > 1);
g_scanBlockSums = (unsigned int**) malloc(level * sizeof(unsigned int*));
g_numLevelsAllocated = level;
numElts = maxNumElements;
level = 0;
do {
unsigned int numBlocks = myMax(1, (int)ceil((float)numElts / (2.f * blockSize)));
if (numBlocks > 1)
CUDA_SAFE_CALL(cudaMalloc((void**) &g_scanBlockSums[level++], numBlocks * sizeof(unsigned int)));
numElts = numBlocks;
} while (numElts > 1);
CUT_CHECK_ERROR("preallocBlockSums");
}
static void deallocBlockSums()
{
for (unsigned int i = 0; i < g_numLevelsAllocated; i++)
{
cudaFree(g_scanBlockSums[i]);
}
CUT_CHECK_ERROR("deallocBlockSums");
free((void**)g_scanBlockSums);
g_scanBlockSums = 0;
g_numEltsAllocated = 0;
g_numLevelsAllocated = 0;
}
static void prescanArrayRecursive(unsigned int *outArray,
const unsigned int *inArray,
int numElements,
int level)
{
unsigned int blockSize = BLOCK_SIZE; // max size of the thread blocks
unsigned int numBlocks =
myMax(1, (int)ceil((float)numElements / (2.f * blockSize)));
unsigned int numThreads;
if (numBlocks > 1)
numThreads = blockSize;
else if (isPowerOfTwo(numElements))
numThreads = numElements / 2;
else
numThreads = floorPow2(numElements);
unsigned int numEltsPerBlock = numThreads * 2;
// if this is a non-power-of-2 array, the last block will be non-full
// compute the smallest power of 2 able to compute its scan.
unsigned int numEltsLastBlock =
numElements - (numBlocks-1) * numEltsPerBlock;
unsigned int numThreadsLastBlock = myMax(1, numEltsLastBlock / 2);
unsigned int np2LastBlock = 0;
unsigned int sharedMemLastBlock = 0;
if (numEltsLastBlock != numEltsPerBlock)
{
np2LastBlock = 1;
if(!isPowerOfTwo(numEltsLastBlock))
numThreadsLastBlock = floorPow2(numEltsLastBlock);
unsigned int extraSpace = (2 * numThreadsLastBlock) / NUM_BANKS;
sharedMemLastBlock =
sizeof(unsigned int) * (2 * numThreadsLastBlock + extraSpace);
}
// padding space is used to avoid shared memory bank conflicts
unsigned int extraSpace = numEltsPerBlock / NUM_BANKS;
unsigned int sharedMemSize =
sizeof(unsigned int) * (numEltsPerBlock + extraSpace);
#ifdef DEBUG
if (numBlocks > 1)
{
assert(g_numEltsAllocated >= numElements);
}
#endif
// setup execution parameters
// if NP2, we process the last block separately
dim3 grid(myMax(1, numBlocks - np2LastBlock), 1, 1);
dim3 threads(numThreads, 1, 1);
// make sure there are no CUDA errors before we start
CUT_CHECK_ERROR("prescanArrayRecursive before kernels");
// execute the scan
if (numBlocks > 1)
{
prescan<true, false><<< grid, threads, sharedMemSize >>>(outArray,
inArray,
g_scanBlockSums[level],
numThreads * 2, 0, 0);
CUT_CHECK_ERROR("prescanWithBlockSums");
if (np2LastBlock)
{
prescan<true, true><<< 1, numThreadsLastBlock, sharedMemLastBlock >>>
(outArray, inArray, g_scanBlockSums[level], numEltsLastBlock,
numBlocks - 1, numElements - numEltsLastBlock);
CUT_CHECK_ERROR("prescanNP2WithBlockSums");
}
// After scanning all the sub-blocks, we are mostly done. But now we
// need to take all of the last values of the sub-blocks and scan those.
// This will give us a new value that must be sdded to each block to
// get the final results.
// recursive (CPU) call
prescanArrayRecursive(g_scanBlockSums[level],
g_scanBlockSums[level],
numBlocks,
level+1);
uniformAdd<<< grid, threads >>>(outArray,
g_scanBlockSums[level],
numElements - numEltsLastBlock,
0, 0);
CUT_CHECK_ERROR("uniformAdd");
if (np2LastBlock)
{
uniformAdd<<< 1, numThreadsLastBlock >>>(outArray,
g_scanBlockSums[level],
numEltsLastBlock,
numBlocks - 1,
numElements - numEltsLastBlock);
CUT_CHECK_ERROR("uniformAdd");
}
}
else if (isPowerOfTwo(numElements))
{
prescan<false, false><<< grid, threads, sharedMemSize >>>(outArray, inArray,
0, numThreads * 2, 0, 0);
CUT_CHECK_ERROR("prescan");
}
else
{
prescan<false, true><<< grid, threads, sharedMemSize >>>(outArray, inArray,
0, numElements, 0, 0);
CUT_CHECK_ERROR("prescanNP2");
}
}
static void prescanArray(unsigned int *outArray, unsigned int *inArray, int numElements)
{
prescanArrayRecursive(outArray, inArray, numElements, 0);
}
#endif // _PRESCAN_CU_
|
840119adf64833e2b68ab6e5bd70376981dc4269.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "includes.h"
/*
* JCudaVec - Vector operations for JCuda
* http://www.jcuda.org
*
* Copyright (c) 2013-2015 Marco Hutter - http://www.jcuda.org
*/
extern "C"
//=== Vector arithmetic ======================================================
extern "C"
extern "C"
extern "C"
extern "C"
extern "C"
//=== Vector-and-scalar arithmetic ===========================================
extern "C"
extern "C"
extern "C"
extern "C"
extern "C"
extern "C"
extern "C"
extern "C"
//=== Vector comparison ======================================================
extern "C"
extern "C"
extern "C"
extern "C"
extern "C"
extern "C"
//=== Vector-and-scalar comparison ===========================================
extern "C"
extern "C"
extern "C"
extern "C"
extern "C"
extern "C"
//=== Vector math (one argument) =============================================
// Calculate the arc cosine of the input argument.
extern "C"
// Calculate the nonnegative arc hyperbolic cosine of the input argument.
extern "C"
// Calculate the arc sine of the input argument.
extern "C"
// Calculate the arc hyperbolic sine of the input argument.
extern "C"
// Calculate the arc tangent of the input argument.
extern "C"
// Calculate the arc hyperbolic tangent of the input argument.
extern "C"
// Calculate the cube root of the input argument.
extern "C"
// Calculate ceiling of the input argument.
extern "C"
// Calculate the cosine of the input argument.
extern "C"
// Calculate the hyperbolic cosine of the input argument.
extern "C"
// Calculate the cosine of the input argument p .
extern "C"
// Calculate the complementary error function of the input argument.
extern "C"
// Calculate the inverse complementary error function of the input argument.
extern "C"
// Calculate the scaled complementary error function of the input argument.
extern "C"
// Calculate the error function of the input argument.
extern "C"
// Calculate the inverse error function of the input argument.
extern "C"
// Calculate the base 10 exponential of the input argument.
extern "C"
// Calculate the base 2 exponential of the input argument.
extern "C"
// Calculate the base e exponential of the input argument.
extern "C"
// Calculate the base e exponential of the input argument, minus 1.
extern "C"
// Calculate the absolute value of its argument.
extern "C"
// Calculate the largest integer less than or equal to x.
extern "C"
// Calculate the value of the Bessel function of the first kind of order 0 for the input argument.
extern "C"
// Calculate the value of the Bessel function of the first kind of order 1 for the input argument.
extern "C"
// Calculate the natural logarithm of the absolute value of the gamma function of the input argument.
extern "C"
// Calculate the base 10 logarithm of the input argument.
extern "C"
// Calculate the value of l o g e ( 1 + x ) .
extern "C"
// Calculate the base 2 logarithm of the input argument.
extern "C"
// Calculate the floating point representation of the exponent of the input argument.
extern "C"
// Calculate the natural logarithm of the input argument.
extern "C"
// Calculate the standard normal cumulative distribution function.
extern "C"
// Calculate the inverse of the standard normal cumulative distribution function.
extern "C"
// Calculate reciprocal cube root function.
extern "C"
// Round input to nearest integer value in floating-point.
extern "C"
// Round to nearest integer value in floating-point.
extern "C"
// Calculate the reciprocal of the square root of the input argument.
extern "C"
// Calculate the sine of the input argument.
extern "C"
// Calculate the hyperbolic sine of the input argument.
extern "C"
// Calculate the sine of the input argument p .
extern "C"
// Calculate the square root of the input argument.
extern "C"
// Calculate the tangent of the input argument.
extern "C"
// Calculate the hyperbolic tangent of the input argument.
extern "C"
// Calculate the gamma function of the input argument.
extern "C"
// Truncate input argument to the integral part.
extern "C"
// Calculate the value of the Bessel function of the second kind of order 0 for the input argument.
extern "C"
// Calculate the value of the Bessel function of the second kind of order 1 for the input argument.
extern "C"
//=== Vector math (two arguments) ============================================
// Create value with given magnitude, copying sign of second value.
extern "C"
// Compute the positive difference between x and y.
extern "C"
// Divide two floating point values.
extern "C"
// Determine the maximum numeric value of the arguments.
extern "C"
// Determine the minimum numeric value of the arguments.
extern "C"
// Calculate the floating-point remainder of x / y.
extern "C"
// Calculate the square root of the sum of squares of two arguments.
extern "C"
// Return next representable single-precision floating-point value afer argument.
extern "C"
// Calculate the value of first argument to the power of second argument.
extern "C"
// Compute single-precision floating-point remainder.
extern "C"
__global__ void vec_normcdfinvf (size_t n, float *result, float *y)
{
int id = threadIdx.x + blockIdx.x * blockDim.x;
if (id < n)
{
result[id] = normcdfinvf(y[id]);
}
} | 840119adf64833e2b68ab6e5bd70376981dc4269.cu | #include "includes.h"
/*
* JCudaVec - Vector operations for JCuda
* http://www.jcuda.org
*
* Copyright (c) 2013-2015 Marco Hutter - http://www.jcuda.org
*/
extern "C"
//=== Vector arithmetic ======================================================
extern "C"
extern "C"
extern "C"
extern "C"
extern "C"
//=== Vector-and-scalar arithmetic ===========================================
extern "C"
extern "C"
extern "C"
extern "C"
extern "C"
extern "C"
extern "C"
extern "C"
//=== Vector comparison ======================================================
extern "C"
extern "C"
extern "C"
extern "C"
extern "C"
extern "C"
//=== Vector-and-scalar comparison ===========================================
extern "C"
extern "C"
extern "C"
extern "C"
extern "C"
extern "C"
//=== Vector math (one argument) =============================================
// Calculate the arc cosine of the input argument.
extern "C"
// Calculate the nonnegative arc hyperbolic cosine of the input argument.
extern "C"
// Calculate the arc sine of the input argument.
extern "C"
// Calculate the arc hyperbolic sine of the input argument.
extern "C"
// Calculate the arc tangent of the input argument.
extern "C"
// Calculate the arc hyperbolic tangent of the input argument.
extern "C"
// Calculate the cube root of the input argument.
extern "C"
// Calculate ceiling of the input argument.
extern "C"
// Calculate the cosine of the input argument.
extern "C"
// Calculate the hyperbolic cosine of the input argument.
extern "C"
// Calculate the cosine of the input argument × p .
extern "C"
// Calculate the complementary error function of the input argument.
extern "C"
// Calculate the inverse complementary error function of the input argument.
extern "C"
// Calculate the scaled complementary error function of the input argument.
extern "C"
// Calculate the error function of the input argument.
extern "C"
// Calculate the inverse error function of the input argument.
extern "C"
// Calculate the base 10 exponential of the input argument.
extern "C"
// Calculate the base 2 exponential of the input argument.
extern "C"
// Calculate the base e exponential of the input argument.
extern "C"
// Calculate the base e exponential of the input argument, minus 1.
extern "C"
// Calculate the absolute value of its argument.
extern "C"
// Calculate the largest integer less than or equal to x.
extern "C"
// Calculate the value of the Bessel function of the first kind of order 0 for the input argument.
extern "C"
// Calculate the value of the Bessel function of the first kind of order 1 for the input argument.
extern "C"
// Calculate the natural logarithm of the absolute value of the gamma function of the input argument.
extern "C"
// Calculate the base 10 logarithm of the input argument.
extern "C"
// Calculate the value of l o g e ( 1 + x ) .
extern "C"
// Calculate the base 2 logarithm of the input argument.
extern "C"
// Calculate the floating point representation of the exponent of the input argument.
extern "C"
// Calculate the natural logarithm of the input argument.
extern "C"
// Calculate the standard normal cumulative distribution function.
extern "C"
// Calculate the inverse of the standard normal cumulative distribution function.
extern "C"
// Calculate reciprocal cube root function.
extern "C"
// Round input to nearest integer value in floating-point.
extern "C"
// Round to nearest integer value in floating-point.
extern "C"
// Calculate the reciprocal of the square root of the input argument.
extern "C"
// Calculate the sine of the input argument.
extern "C"
// Calculate the hyperbolic sine of the input argument.
extern "C"
// Calculate the sine of the input argument × p .
extern "C"
// Calculate the square root of the input argument.
extern "C"
// Calculate the tangent of the input argument.
extern "C"
// Calculate the hyperbolic tangent of the input argument.
extern "C"
// Calculate the gamma function of the input argument.
extern "C"
// Truncate input argument to the integral part.
extern "C"
// Calculate the value of the Bessel function of the second kind of order 0 for the input argument.
extern "C"
// Calculate the value of the Bessel function of the second kind of order 1 for the input argument.
extern "C"
//=== Vector math (two arguments) ============================================
// Create value with given magnitude, copying sign of second value.
extern "C"
// Compute the positive difference between x and y.
extern "C"
// Divide two floating point values.
extern "C"
// Determine the maximum numeric value of the arguments.
extern "C"
// Determine the minimum numeric value of the arguments.
extern "C"
// Calculate the floating-point remainder of x / y.
extern "C"
// Calculate the square root of the sum of squares of two arguments.
extern "C"
// Return next representable single-precision floating-point value afer argument.
extern "C"
// Calculate the value of first argument to the power of second argument.
extern "C"
// Compute single-precision floating-point remainder.
extern "C"
__global__ void vec_normcdfinvf (size_t n, float *result, float *y)
{
int id = threadIdx.x + blockIdx.x * blockDim.x;
if (id < n)
{
result[id] = normcdfinvf(y[id]);
}
} |
0634d7c71616d82668c411d4430c436f37208f7c.hip | // !!! This is a file automatically generated by hipify!!!
/*
/*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include <cctype>
#include <algorithm>
#include <functional>
#include <numeric>
#include <ctime>
#include <time.h>
#include "cm.h"
#include "atof.h"
#include "compress.cu"
#include "sorts.hip"
#include "filter.h"
#include "callbacks.h"
#include "zone_map.h"
#ifdef _WIN64
#define atoll(S) _atoi64(S)
#define fseek(S, S1, S2) _fseeki64(S, S1, S2)
#include <windows.h>
#else
#include <unistd.h>
#endif
using namespace std;
using namespace thrust::placeholders;
size_t total_count = 0, total_max;
clock_t tot;
unsigned int total_segments = 0, old_segments;
size_t process_count;
size_t alloced_sz = 0;
bool fact_file_loaded = 1;
bool verbose;
bool interactive, ssd, delta, star;
unsigned int prs;
void* d_v = nullptr;
void* s_v = nullptr;
queue<string> op_sort;
queue<string> op_presort;
queue<string> op_type;
bool op_case = 0;
string grp_val;
queue<string> op_value;
queue<int_type> op_nums;
queue<float_type> op_nums_f;
queue<unsigned int> op_nums_precision;
queue<string> col_aliases;
map<string, map<string, col_data> > data_dict;
unordered_map<string, unordered_map<unsigned long long int, size_t> > char_hash;
map<string, char*> index_buffers;
map<string, unsigned long long int*> idx_vals;
map<string, char*> buffers;
map<string, size_t> buffer_sizes;
size_t total_buffer_size;
queue<string> buffer_names;
void* alloced_tmp;
bool alloced_switch = 0;
map<string,CudaSet*> varNames; // STL map to manage CudaSet variables
map<string, unsigned int> cpy_bits;
map<string, long long int> cpy_init_val;
char* readbuff = nullptr;
thrust::device_vector<unsigned int> rcol_matches;
thrust::device_vector<int_type> rcol_dev;
struct f_equal_to
{
__host__ __device__
bool operator()(const float_type x, const float_type y)
{
return (((x-y) < EPSILON) && ((x-y) > -EPSILON));
}
};
struct f_less
{
__host__ __device__
bool operator()(const float_type x, const float_type y)
{
return ((y-x) > EPSILON);
}
};
struct f_greater
{
__host__ __device__
bool operator()(const float_type x, const float_type y)
{
return ((x-y) > EPSILON);
}
};
struct f_greater_equal_to
{
__host__ __device__
bool operator()(const float_type x, const float_type y)
{
return (((x-y) > EPSILON) || (((x-y) < EPSILON) && ((x-y) > -EPSILON)));
}
};
struct f_less_equal
{
__host__ __device__
bool operator()(const float_type x, const float_type y)
{
return (((y-x) > EPSILON) || (((x-y) < EPSILON) && ((x-y) > -EPSILON)));
}
};
struct f_not_equal_to
{
__host__ __device__
bool operator()(const float_type x, const float_type y)
{
return ((x-y) > EPSILON) || ((x-y) < -EPSILON);
}
};
struct long_to_float_type
{
__host__ __device__
float_type operator()(const int_type x)
{
return (float_type)x;
}
};
template <typename T>
struct power_functor : public thrust::unary_function<T,T>
{
unsigned int a;
__host__ __device__
power_functor(unsigned int a_) {
a = a_;
}
__host__ __device__
T operator()(T x)
{
return x*(unsigned int)pow((double)10,(double)a);
}
};
struct is_zero
{
__host__ __device__
bool operator()(const int &x)
{
return x == 0;
}
};
int get_utc_offset() {
time_t zero = 24*60*60L;
struct tm * timeptr;
int gmtime_hours;
/* get the local time for Jan 2, 1900 00:00 UTC */
timeptr = localtime( &zero );
gmtime_hours = timeptr->tm_hour;
/* if the local time is the "day before" the UTC, subtract 24 hours
from the hours to get the UTC offset */
if( timeptr->tm_mday < 2 )
gmtime_hours -= 24;
return gmtime_hours;
}
/*
the utc analogue of mktime,
(much like timegm on some systems)
*/
time_t tm_to_time_t_utc( struct tm * timeptr ) {
/* gets the epoch time relative to the local time zone,
and then adds the appropriate number of seconds to make it UTC */
return mktime( timeptr ) + get_utc_offset() * 3600;
}
/*class power_functor {
unsigned int a;
public:
power_functor(unsigned int a_) { a = a_; }
__host__ __device__ int_type operator()(int_type x) const
{
return x*(unsigned int)pow((double)10,(double)a);
}
};
*/
void allocColumns(CudaSet* a, queue<string> fields);
void copyColumns(CudaSet* a, queue<string> fields, unsigned int segment, size_t& count, bool rsz, bool flt);
void mygather(unsigned int tindex, unsigned int idx, CudaSet* a, CudaSet* t, size_t count, size_t g_size);
void mycopy(unsigned int tindex, unsigned int idx, CudaSet* a, CudaSet* t, size_t count, size_t g_size);
void write_compressed_char(string file_name, unsigned int index, size_t mCount);
size_t getFreeMem();
size_t getTotalSystemMemory();
void process_error(int severity, string err);
CudaSet::CudaSet(queue<string> &nameRef, queue<string> &typeRef, queue<int> &sizeRef, queue<int> &colsRef, size_t Recs)
: mColumnCount(0), mRecCount(0)
{
initialize(nameRef, typeRef, sizeRef, colsRef, Recs);
source = 1;
text_source = 1;
fil_f = nullptr;
fil_s = nullptr;
};
CudaSet::CudaSet(queue<string> &nameRef, queue<string> &typeRef, queue<int> &sizeRef, queue<int> &colsRef, size_t Recs, string file_name, unsigned int max)
: mColumnCount(0), mRecCount(0)
{
maxRecs = max;
initialize(nameRef, typeRef, sizeRef, colsRef, Recs, file_name);
source = 1;
text_source = 0;
fil_f = nullptr;
fil_s = nullptr;
};
CudaSet::CudaSet(const size_t RecordCount, const unsigned int ColumnCount)
{
initialize(RecordCount, ColumnCount);
keep = false;
source = 0;
text_source = 0;
fil_f = nullptr;
fil_s = nullptr;
};
CudaSet::CudaSet(queue<string> op_sel, const queue<string> op_sel_as)
{
initialize(op_sel, op_sel_as);
keep = false;
source = 0;
text_source = 0;
fil_f = nullptr;
fil_s = nullptr;
};
CudaSet::CudaSet(CudaSet* a, CudaSet* b, queue<string> op_sel, queue<string> op_sel_as)
{
initialize(a,b, op_sel, op_sel_as);
keep = false;
source = 0;
text_source = 0;
fil_f = nullptr;
fil_s = nullptr;
};
CudaSet::~CudaSet()
{
free();
};
void CudaSet::allocColumnOnDevice(string colname, size_t RecordCount)
{
if (type[colname] != 1 ) {
d_columns_int[colname].resize(RecordCount);
}
else
d_columns_float[colname].resize(RecordCount);
};
void CudaSet::resize_join(size_t addRecs)
{
mRecCount = mRecCount + addRecs;
for(unsigned int i=0; i < columnNames.size(); i++) {
if(type[columnNames[i]] != 1) {
h_columns_int[columnNames[i]].resize(mRecCount);
}
else
h_columns_float[columnNames[i]].resize(mRecCount);
};
};
void CudaSet::resize(size_t addRecs)
{
mRecCount = mRecCount + addRecs;
for(unsigned int i=0; i < columnNames.size(); i++) {
if(type[columnNames[i]] != 1) {
h_columns_int[columnNames[i]].resize(mRecCount);
}
else {
h_columns_float[columnNames[i]].resize(mRecCount);
}
};
};
void CudaSet::deAllocColumnOnDevice(string colname)
{
if (type[colname] != 1 && !d_columns_int.empty() && d_columns_int.find(colname) != d_columns_int.end()) {
if(d_columns_int[colname].size() > 0) {
d_columns_int[colname].resize(0);
d_columns_int[colname].shrink_to_fit();
};
}
else
if (type[colname] == 1 && !d_columns_float.empty()) {
if (d_columns_float[colname].size() > 0) {
d_columns_float[colname].resize(0);
d_columns_float[colname].shrink_to_fit();
};
};
};
void CudaSet::allocOnDevice(size_t RecordCount)
{
for(unsigned int i=0; i < columnNames.size(); i++)
allocColumnOnDevice(columnNames[i], RecordCount);
};
void CudaSet::deAllocOnDevice()
{
for(unsigned int i=0; i < columnNames.size(); i++) {
deAllocColumnOnDevice(columnNames[i]);
};
if(prm_d.size()) {
prm_d.resize(0);
prm_d.shrink_to_fit();
};
for (auto it=d_columns_int.begin(); it != d_columns_int.end(); ++it ) {
if(it->second.size() > 0) {
it->second.resize(0);
it->second.shrink_to_fit();
};
};
for (auto it=d_columns_float.begin(); it != d_columns_float.end(); ++it ) {
if(it->second.size() > 0) {
it->second.resize(0);
it->second.shrink_to_fit();
};
};
if(filtered) { // dealloc the source
if(varNames.find(source_name) != varNames.end()) {
varNames[source_name]->deAllocOnDevice();
};
};
};
void CudaSet::resizeDeviceColumn(size_t RecCount, string colname)
{
if (type[colname] != 1) {
d_columns_int[colname].resize(RecCount);
}
else
d_columns_float[colname].resize(RecCount);
};
void CudaSet::resizeDevice(size_t RecCount)
{
for(unsigned int i=0; i < columnNames.size(); i++) {
resizeDeviceColumn(RecCount, columnNames[i]);
};
};
bool CudaSet::onDevice(string colname)
{
if (type[colname] != 1) {
if (!d_columns_int.empty() && d_columns_int[colname].size())
return 1;
}
else
if (!d_columns_float.empty() && d_columns_float[colname].size())
return 1;
return 0;
}
CudaSet* CudaSet::copyDeviceStruct()
{
CudaSet* a = new CudaSet(mRecCount, mColumnCount);
a->not_compressed = not_compressed;
a->segCount = segCount;
a->maxRecs = maxRecs;
a->columnNames = columnNames;
a->ts_cols = ts_cols;
a->cols = cols;
a->type = type;
a->char_size = char_size;
a->decimal = decimal;
a->decimal_zeroes = decimal_zeroes;
for(unsigned int i=0; i < columnNames.size(); i++) {
if(a->type[columnNames[i]] == 0) {
a->d_columns_int[columnNames[i]] = thrust::device_vector<int_type>();
a->h_columns_int[columnNames[i]] = thrust::host_vector<int_type, uninitialized_host_allocator<int_type> >();
}
else
if(a->type[columnNames[i]] == 1) {
a->d_columns_float[columnNames[i]] = thrust::device_vector<float_type>();
a->h_columns_float[columnNames[i]] = thrust::host_vector<float_type, uninitialized_host_allocator<float_type> >();
}
else {
a->h_columns_char[columnNames[i]] = nullptr;
a->d_columns_char[columnNames[i]] = nullptr;
};
};
a->load_file_name = load_file_name;
a->mRecCount = 0;
return a;
}
int_type CudaSet::readSsdSegmentsFromFile(unsigned int segNum, string colname, size_t offset, thrust::host_vector<unsigned int>& prm_vh, CudaSet* dest)
{
string f1 = load_file_name + "." + colname + "." + to_string(segNum);
FILE* f = fopen(f1.c_str(), "rb" );
if(!f) {
cout << "Error opening " << f1 << " file " << endl;
exit(0);
};
unsigned int cnt, bits;
int_type lower_val;
unsigned short int val_s_r[4096/2];
char val_c_r[4096];
unsigned int val_i_r[4096/4];
unsigned long long int val_l_r[4096/8];
unsigned int idx;
bool idx_set = 0;
fread(&cnt, 4, 1, f);
fread(&lower_val, 8, 1, f);
fseek(f, cnt - (8+4) + 32, SEEK_CUR);
fread(&bits, 4, 1, f);
//cout << "lower_val bits " << lower_val << " " << bits << endl;
if(type[colname] == 0) {
//cout << "lower_val bits " << lower_val << " " << bits << endl;
for(unsigned int i = 0; i < prm_vh.size(); i++) {
if(!idx_set || prm_vh[i] >= idx + 4096/(bits/8)) {
fseek(f, 24 + prm_vh[i]*(bits/8), SEEK_SET);
idx = prm_vh[i];
idx_set = 1;
if(bits == 8) {
fread(&val_c_r[0], 4096, 1, f);
dest->h_columns_int[colname][i + offset] = val_c_r[0];
}
else
if(bits == 16) {
fread(&val_s_r, 4096, 1, f);
dest->h_columns_int[colname][i + offset] = val_s_r[0];
}
if(bits == 32) {
fread(&val_i_r, 4096, 1, f);
dest->h_columns_int[colname][i + offset] = val_i_r[0];
}
if(bits == 84) {
fread(&val_l_r, 4096, 1, f);
dest->h_columns_int[colname][i + offset] = val_l_r[0];
}
}
else {
if(bits == 8) {
dest->h_columns_int[colname][i + offset] = val_c_r[prm_vh[i]-idx];
}
else
if(bits == 16) {
dest->h_columns_int[colname][i + offset] = val_s_r[prm_vh[i]-idx];
}
if(bits == 32) {
dest->h_columns_int[colname][i + offset] = val_i_r[prm_vh[i]-idx];
}
if(bits == 84) {
dest->h_columns_int[colname][i + offset] = val_l_r[prm_vh[i]-idx];
}
};
};
}
else
if(type[colname] == 1) {
for(unsigned int i = 0; i < prm_vh.size(); i++) {
if(!idx_set || prm_vh[i] >= idx + 4096/(bits/8)) {
fseek(f, 24 + prm_vh[i]*(bits/8), SEEK_SET);
idx = prm_vh[i];
idx_set = 1;
fread(val_c_r, 4096, 1, f);
memcpy(&dest->h_columns_float[colname][i + offset], &val_c_r[0], bits/8);
}
else {
memcpy(&dest->h_columns_float[colname][i + offset], &val_c_r[(prm_vh[i]-idx)*(bits/8)], bits/8);
};
};
}
else {
//no strings in fact tables
};
fclose(f);
return lower_val;
}
int_type CudaSet::readSsdSegmentsFromFileR(unsigned int segNum, string colname, thrust::host_vector<unsigned int>& prm_vh, thrust::host_vector<unsigned int>& dest)
{
string f1 = load_file_name + "." + colname + "." + to_string(segNum);
FILE* f = fopen(f1.c_str(), "rb" );
if(!f) {
cout << "Error opening " << f1 << " file " << endl;
exit(0);
};
unsigned int cnt, bits;
int_type lower_val;
fread(&cnt, 4, 1, f);
fread(&lower_val, 8, 1, f);
fseek(f, cnt - (8+4) + 32, SEEK_CUR);
fread(&bits, 4, 1, f);
unsigned short int val_s_r[4096/2];
char val_c_r[4096];
unsigned int val_i_r[4096/4];
unsigned long long int val_l_r[4096/8];
unsigned int idx;
bool idx_set = 0;
for(unsigned int i = 0; i < prm_vh.size(); i++) {
if(!idx_set || prm_vh[i] >= idx + 4096/(bits/8)) {
fseek(f, 24 + prm_vh[i]*(bits/8), SEEK_SET);
idx = prm_vh[i];
idx_set = 1;
if(bits == 8) {
fread(val_c_r, 4096, 1, f);
dest[i] = val_c_r[0];
}
else
if(bits == 16) {
fread(val_s_r, 4096, 1, f);
dest[i] = val_s_r[0];
}
if(bits == 32) {
fread(val_i_r, 4096, 1, f);
dest[i] = val_i_r[0];
}
if(bits == 84) {
fread(val_l_r, 4096, 1, f);
dest[i] = val_l_r[0];
}
}
else {
if(bits == 8) {
dest[i] = val_c_r[prm_vh[i]-idx];
}
else
if(bits == 16) {
dest[i] = val_s_r[prm_vh[i]-idx];
}
if(bits == 32) {
dest[i] = val_i_r[prm_vh[i]-idx];
}
if(bits == 84) {
dest[i] = val_l_r[prm_vh[i]-idx];
}
};
};
fclose(f);
return lower_val;
}
std::clock_t tot_disk;
void CudaSet::readSegmentsFromFile(unsigned int segNum, string colname)
{
string f1 = load_file_name + "." + colname + "." + to_string(segNum);
if(type[colname] == 2)
f1 = f1 + ".idx";
std::clock_t start1 = std::clock();
if(interactive) { //check if data are in buffers
if(buffers.find(f1) == buffers.end()) { // add data to buffers
FILE* f = fopen(f1.c_str(), "rb" );
if(!f) {
process_error(3, "Error opening " + string(f1) +" file " );
};
fseek(f, 0, SEEK_END);
long fileSize = ftell(f);
while(total_buffer_size + fileSize > getTotalSystemMemory() && !buffer_names.empty()) { //free some buffers
//delete [] buffers[buffer_names.front()];
hipHostFree(buffers[buffer_names.front()]);
total_buffer_size = total_buffer_size - buffer_sizes[buffer_names.front()];
buffer_sizes.erase(buffer_names.front());
buffers.erase(buffer_names.front());
buffer_names.pop();
};
fseek(f, 0, SEEK_SET);
char* buff;
hipHostMalloc((void**) &buff, fileSize,hipHostMallocDefault);
fread(buff, fileSize, 1, f);
fclose(f);
buffers[f1] = buff;
buffer_sizes[f1] = fileSize;
buffer_names.push(f1);
total_buffer_size = total_buffer_size + fileSize;
buffer_names.push(f1);
cout << "added buffer " << f1 << " " << fileSize << endl;
};
// get data from buffers
if(type[colname] != 1) {
unsigned int cnt = ((unsigned int*)buffers[f1])[0];
if(cnt > h_columns_int[colname].size()/8 + 10)
h_columns_int[colname].resize(cnt/8 + 10);
}
else {
unsigned int cnt = ((unsigned int*)buffers[f1])[0];
if(cnt > h_columns_float[colname].size()/8 + 10)
h_columns_float[colname].resize(cnt/8 + 10);
}
}
else {
FILE* f = fopen(f1.c_str(), "rb" );
if(!f) {
cout << "Error opening " << f1 << " file " << endl;
exit(0);
};
if(type[colname] != 1) {
if(1 > h_columns_int[colname].size())
h_columns_int[colname].resize(1);
fread(h_columns_int[colname].data(), 4, 1, f);
unsigned int cnt = ((unsigned int*)(h_columns_int[colname].data()))[0];
if(cnt/8+10 > h_columns_int[colname].size()) {
h_columns_int[colname].resize(cnt + 10);
};
size_t rr = fread((unsigned int*)(h_columns_int[colname].data()) + 1, 1, cnt+52, f);
if(rr != cnt+52) {
char buf[1024];
sprintf(buf, "Couldn't read %d bytes from %s ,read only", cnt+52, f1.c_str());
process_error(3, string(buf));
};
}
else {
if(1 > h_columns_float[colname].size())
h_columns_float[colname].resize(1);
fread(h_columns_float[colname].data(), 4, 1, f);
unsigned int cnt = ((unsigned int*)(h_columns_float[colname].data()))[0];
if(cnt/8+10 > h_columns_float[colname].size())
h_columns_float[colname].resize(cnt + 10);
size_t rr = fread((unsigned int*)(h_columns_float[colname].data()) + 1, 1, cnt+52, f);
if(rr != cnt+52) {
char buf[1024];
sprintf(buf, "Couldn't read %d bytes from %s ,read only", cnt+52, f1.c_str());
process_error(3, string(buf));
};
}
fclose(f);
};
tot_disk = tot_disk + (std::clock() - start1);
};
void CudaSet::CopyColumnToGpu(string colname, unsigned int segment, size_t offset)
{
if(not_compressed) {
// calculate how many records we need to copy
if(segment < segCount-1) {
mRecCount = maxRecs;
}
else {
mRecCount = hostRecCount - maxRecs*(segCount-1);
};
if(type[colname] != 1) {
if(!alloced_switch) {
thrust::copy(h_columns_int[colname].begin() + maxRecs*segment, h_columns_int[colname].begin() + maxRecs*segment + mRecCount, d_columns_int[colname].begin() + offset);
}
else {
thrust::device_ptr<int_type> d_col((int_type*)alloced_tmp);
thrust::copy(h_columns_int[colname].begin() + maxRecs*segment, h_columns_int[colname].begin() + maxRecs*segment + mRecCount, d_col);
};
}
else {
if(!alloced_switch) {
thrust::copy(h_columns_float[colname].begin() + maxRecs*segment, h_columns_float[colname].begin() + maxRecs*segment + mRecCount, d_columns_float[colname].begin() + offset);
}
else {
thrust::device_ptr<float_type> d_col((float_type*)alloced_tmp);
thrust::copy(h_columns_float[colname].begin() + maxRecs*segment, h_columns_float[colname].begin() + maxRecs*segment + mRecCount, d_col);
};
}
}
else {
readSegmentsFromFile(segment,colname);
if(!d_v)
CUDA_SAFE_CALL(hipMalloc((void **) &d_v, 12));
if(!s_v)
CUDA_SAFE_CALL(hipMalloc((void **) &s_v, 8));
string f1;
if(type[colname] == 2) {
f1 = load_file_name + "." + colname + "." + to_string(segment) + ".idx";
}
else {
f1 = load_file_name + "." + colname + "." + to_string(segment);
};
if(type[colname] != 1) {
if(!alloced_switch) {
if(buffers.find(f1) == buffers.end()) {
mRecCount = pfor_decompress(thrust::raw_pointer_cast(d_columns_int[colname].data() + offset), h_columns_int[colname].data(), d_v, s_v, colname);
}
else {
mRecCount = pfor_decompress(thrust::raw_pointer_cast(d_columns_int[colname].data() + offset), buffers[f1], d_v, s_v, colname);
};
}
else {
if(buffers.find(f1) == buffers.end()) {
mRecCount = pfor_decompress(alloced_tmp, h_columns_int[colname].data(), d_v, s_v, colname);
}
else {
mRecCount = pfor_decompress(alloced_tmp, buffers[f1], d_v, s_v, colname);
};
};
}
else {
if(decimal[colname]) {
if(!alloced_switch) {
if(buffers.find(f1) == buffers.end()) {
mRecCount = pfor_decompress( thrust::raw_pointer_cast(d_columns_float[colname].data() + offset) , h_columns_float[colname].data(), d_v, s_v, colname);
}
else {
mRecCount = pfor_decompress( thrust::raw_pointer_cast(d_columns_float[colname].data() + offset) , buffers[f1], d_v, s_v, colname);
};
if(!phase_copy) {
thrust::device_ptr<long long int> d_col_int((long long int*)thrust::raw_pointer_cast(d_columns_float[colname].data() + offset));
thrust::transform(d_col_int,d_col_int+mRecCount,d_columns_float[colname].begin(), long_to_float());
};
}
else {
if(buffers.find(f1) == buffers.end()) {
mRecCount = pfor_decompress(alloced_tmp, h_columns_float[colname].data(), d_v, s_v, colname);
}
else {
mRecCount = pfor_decompress(alloced_tmp, buffers[f1], d_v, s_v, colname);
};
if(!phase_copy) {
thrust::device_ptr<long long int> d_col_int((long long int*)alloced_tmp);
thrust::device_ptr<float_type> d_col_float((float_type*)alloced_tmp);
thrust::transform(d_col_int,d_col_int+mRecCount, d_col_float, long_to_float());
};
//for(int i = 0; i < mRecCount;i++)
//cout << "DECOMP " << (float_type)(d_col_int[i]) << " " << d_col_float[i] << endl;
};
}
//else // uncompressed float
// will have to fix it later so uncompressed data will be written by segments too
}
};
}
void CudaSet::CopyColumnToGpu(string colname) // copy all segments
{
if(not_compressed) {
if(type[colname] != 1)
thrust::copy(h_columns_int[colname].begin(), h_columns_int[colname].begin() + mRecCount, d_columns_int[colname].begin());
else
thrust::copy(h_columns_float[colname].begin(), h_columns_float[colname].begin() + mRecCount, d_columns_float[colname].begin());
}
else {
if(!d_v)
CUDA_SAFE_CALL(hipMalloc((void **) &d_v, 12));
if(!s_v)
CUDA_SAFE_CALL(hipMalloc((void **) &s_v, 8));
size_t cnt = 0;
string f1;
for(unsigned int i = 0; i < segCount; i++) {
readSegmentsFromFile(i,colname);
if(type[colname] == 2) {
f1 = load_file_name + "." + colname + "." + to_string(i) + ".idx";
}
else {
f1 = load_file_name + "." + colname + "." + to_string(i);
};
if(type[colname] == 0) {
if(buffers.find(f1) == buffers.end()) {
mRecCount = pfor_decompress(thrust::raw_pointer_cast(d_columns_int[colname].data() + cnt), h_columns_int[colname].data(), d_v, s_v, colname);
}
else {
mRecCount = pfor_decompress(thrust::raw_pointer_cast(d_columns_int[colname].data() + cnt), buffers[f1], d_v, s_v, colname);
};
}
else
if(type[colname] == 1) {
if(decimal[colname]) {
if(buffers.find(f1) == buffers.end()) {
mRecCount = pfor_decompress( thrust::raw_pointer_cast(d_columns_float[colname].data() + cnt) , h_columns_float[colname].data(), d_v, s_v, colname);
}
else {
mRecCount = pfor_decompress( thrust::raw_pointer_cast(d_columns_float[colname].data() + cnt) , buffers[f1], d_v, s_v, colname);
};
if(!phase_copy) {
thrust::device_ptr<long long int> d_col_int((long long int*)thrust::raw_pointer_cast(d_columns_float[colname].data() + cnt));
thrust::transform(d_col_int,d_col_int+mRecCount,d_columns_float[colname].begin() + cnt, long_to_float());
};
}
// else uncompressed float
// will have to fix it later so uncompressed data will be written by segments too
};
cnt = cnt + mRecCount;
//totalRecs = totals + mRecCount;
};
mRecCount = cnt;
};
}
void CudaSet::CopyColumnToHost(string colname, size_t offset, size_t RecCount)
{
if(type[colname] != 1) {
thrust::copy(d_columns_int[colname].begin(), d_columns_int[colname].begin() + RecCount, h_columns_int[colname].begin() + offset);
}
else
thrust::copy(d_columns_float[colname].begin(), d_columns_float[colname].begin() + RecCount, h_columns_float[colname].begin() + offset);
}
void CudaSet::CopyColumnToHost(string colname)
{
CopyColumnToHost(colname, 0, mRecCount);
}
void CudaSet::CopyToHost(size_t offset, size_t count)
{
for(unsigned int i = 0; i < columnNames.size(); i++) {
CopyColumnToHost(columnNames[i], offset, count);
};
}
float_type* CudaSet::get_float_type_by_name(string name)
{
return thrust::raw_pointer_cast(d_columns_float[name].data());
}
int_type* CudaSet::get_int_by_name(string name)
{
return thrust::raw_pointer_cast(d_columns_int[name].data());
}
float_type* CudaSet::get_host_float_by_name(string name)
{
return thrust::raw_pointer_cast(h_columns_float[name].data());
}
int_type* CudaSet::get_host_int_by_name(string name)
{
return thrust::raw_pointer_cast(h_columns_int[name].data());
}
void CudaSet::GroupBy(stack<string> columnRef)
{
thrust::device_vector<bool> grp_dev(mRecCount);
thrust::fill(grp_dev.begin(), grp_dev.end(), 0);
if(scratch.size() < mRecCount)
scratch.resize(mRecCount*sizeof(bool));
thrust::device_ptr<bool> d_group((bool*)thrust::raw_pointer_cast(scratch.data()));
d_group[mRecCount-1] = 0;
for(int i = 0; i < columnRef.size(); columnRef.pop()) {
unsigned int bits;
if(cpy_bits.empty())
bits = 0;
else
bits = cpy_bits[columnRef.top()];
if(bits == 8) {
thrust::device_ptr<unsigned char> src((unsigned char*)thrust::raw_pointer_cast(d_columns_int[columnRef.top()].data()));
thrust::transform(src, src + mRecCount - 1, src+1, d_group, thrust::not_equal_to<unsigned char>());
}
else
if(bits == 16) {
thrust::device_ptr<unsigned short int> src((unsigned short int*)thrust::raw_pointer_cast(d_columns_int[columnRef.top()].data()));
thrust::transform(src, src + mRecCount - 1, src+1, d_group, thrust::not_equal_to<unsigned short int>());
}
else
if(bits == 32) {
thrust::device_ptr<unsigned int> src((unsigned int*)thrust::raw_pointer_cast(d_columns_int[columnRef.top()].data()));
thrust::transform(src, src + mRecCount - 1, src+1, d_group, thrust::not_equal_to<unsigned int>());
}
else {
thrust::transform(d_columns_int[columnRef.top()].begin(), d_columns_int[columnRef.top()].begin() + mRecCount - 1,
d_columns_int[columnRef.top()].begin()+1, d_group, thrust::not_equal_to<int_type>());
};
thrust::transform(d_group, d_group+mRecCount, grp_dev.begin(), grp_dev.begin(), thrust::logical_or<bool>());
};
grp_count = thrust::count(grp_dev.begin(), grp_dev.end(), 1) + 1;
//cout << "grp count " << grp_count << endl;
grp.resize(grp_count);
if(grp_count > 1)
thrust::copy_if(thrust::make_counting_iterator((unsigned int)1), thrust::make_counting_iterator((unsigned int)grp_dev.size()),
grp_dev.begin(), grp.begin()+1, thrust::identity<bool>());
grp[0] = 0;
};
void CudaSet::addDeviceColumn(int_type* col, string colname, size_t recCount)
{
if (std::find(columnNames.begin(), columnNames.end(), colname) == columnNames.end()) {
columnNames.push_back(colname);
type[colname] = 0;
d_columns_int[colname] = thrust::device_vector<int_type>(recCount);
h_columns_int[colname] = thrust::host_vector<int_type, uninitialized_host_allocator<int_type> >(recCount);
}
else { // already exists, my need to resize it
if(d_columns_int[colname].size() < recCount) {
d_columns_int[colname].resize(recCount);
};
if(h_columns_int[colname].size() < recCount) {
h_columns_int[colname].resize(recCount);
};
};
// copy data to d columns
thrust::device_ptr<int_type> d_col((int_type*)col);
thrust::copy(d_col, d_col+recCount, d_columns_int[colname].begin());
thrust::copy(d_columns_int[colname].begin(), d_columns_int[colname].begin()+recCount, h_columns_int[colname].begin());
};
void CudaSet::addDeviceColumn(float_type* col, string colname, size_t recCount, bool is_decimal)
{
if (std::find(columnNames.begin(), columnNames.end(), colname) == columnNames.end()) {
columnNames.push_back(colname);
type[colname] = 1;
d_columns_float[colname] = thrust::device_vector<float_type>(recCount);
h_columns_float[colname] = thrust::host_vector<float_type, uninitialized_host_allocator<float_type> >(recCount);
}
else { // already exists, my need to resize it
if(d_columns_float[colname].size() < recCount)
d_columns_float[colname].resize(recCount);
if(h_columns_float[colname].size() < recCount)
h_columns_float[colname].resize(recCount);
};
decimal[colname] = is_decimal;
thrust::device_ptr<float_type> d_col((float_type*)col);
thrust::copy(d_col, d_col+recCount, d_columns_float[colname].begin());
};
void CudaSet::gpu_perm(queue<string> sf, thrust::device_vector<unsigned int>& permutation) {
permutation.resize(mRecCount);
thrust::sequence(permutation.begin(), permutation.begin() + mRecCount,0,1);
unsigned int* raw_ptr = thrust::raw_pointer_cast(permutation.data());
void* temp;
CUDA_SAFE_CALL(hipMalloc((void **) &temp, mRecCount*8));
string sort_type = "ASC";
while(!sf.empty()) {
if (type[sf.front()] == 0) {
update_permutation(d_columns_int[sf.front()], raw_ptr, mRecCount, sort_type, (int_type*)temp, 64);
}
else
if (type[sf.front()] == 1) {
update_permutation(d_columns_float[sf.front()], raw_ptr, mRecCount, sort_type, (float_type*)temp, 64);
}
else {
thrust::host_vector<unsigned int> permutation_h = permutation;
char* temp1 = new char[char_size[sf.front()]*mRecCount];
update_permutation_char_host(h_columns_char[sf.front()], permutation_h.data(), mRecCount, sort_type, temp1, char_size[sf.front()]);
delete [] temp1;
permutation = permutation_h;
};
sf.pop();
};
hipFree(temp);
}
void CudaSet::compress(string file_name, size_t offset, unsigned int check_type, unsigned int check_val, size_t mCount, const bool append)
{
string str(file_name);
thrust::device_vector<unsigned int> permutation;
long long int oldCount;
bool int_check = 0;
void* d;
CUDA_SAFE_CALL(hipMalloc((void **) &d, mCount*float_size));
total_count = total_count + mCount;
if (mCount > total_max && op_sort.empty()) {
total_max = mCount;
};
if(!total_segments && append) {
string s= file_name + "." + columnNames[0] + ".header";
ifstream binary_file(s.c_str(),ios::binary);
if(binary_file) {
binary_file.read((char *)&oldCount, 8);
binary_file.read((char *)&total_segments, 4);
binary_file.read((char *)&maxRecs, 4);
if(total_max < maxRecs)
total_max = maxRecs;
binary_file.close();
total_count = oldCount + mCount;
};
};
if(!op_sort.empty()) { //sort the segment
gpu_perm(op_sort, permutation);
};
// here we need to check for partitions and if partition_count > 0 -> create partitions
if(mCount < partition_count || partition_count == 0)
partition_count = 1;
unsigned int partition_recs = mCount/partition_count;
if(!op_sort.empty()) {
if(total_max < partition_recs)
total_max = partition_recs;
};
total_segments++;
old_segments = total_segments;
size_t new_offset;
for(unsigned int i = 0; i < columnNames.size(); i++) {
std::clock_t start1 = std::clock();
string colname = columnNames[i];
str = file_name + "." + colname;
curr_file = str;
str += "." + to_string(total_segments-1);
new_offset = 0;
if(type[colname] == 0) {
thrust::device_ptr<int_type> d_col((int_type*)d);
if(!op_sort.empty()) {
thrust::gather(permutation.begin(), permutation.end(), d_columns_int[colname].begin(), d_col);
for(unsigned int p = 0; p < partition_count; p++) {
str = file_name + "." + colname;
curr_file = str;
str += "." + to_string(total_segments-1);
if (p < partition_count - 1) {
pfor_compress( (int_type*)d + new_offset, partition_recs*int_size, str, h_columns_int[colname], 0);
}
else {
pfor_compress( (int_type*)d + new_offset, (mCount - partition_recs*p)*int_size, str, h_columns_int[colname], 0);
};
new_offset = new_offset + partition_recs;
total_segments++;
};
}
else {
if(!int_check) {
thrust::copy(h_columns_int[colname].begin() + offset, h_columns_int[colname].begin() + offset + mCount, d_col);
pfor_compress( d, mCount*int_size, str, h_columns_int[colname], 0);
}
else {
pfor_compress( thrust::raw_pointer_cast(d_columns_int[colname].data()), mCount*int_size, str, h_columns_int[colname], 0);
};
};
}
else
if(type[colname] == 1) {
if(decimal[colname]) {
thrust::device_ptr<float_type> d_col((float_type*)d);
if(!op_sort.empty()) {
thrust::gather(permutation.begin(), permutation.end(), d_columns_float[colname].begin(), d_col);
thrust::device_ptr<long long int> d_col_dec((long long int*)d);
thrust::transform(d_col,d_col+mCount,d_col_dec, float_to_long());
for(unsigned int p = 0; p < partition_count; p++) {
str = file_name + "." + colname;
curr_file = str;
str += "." + to_string(total_segments-1);
if (p < partition_count - 1)
pfor_compress( (int_type*)d + new_offset, partition_recs*float_size, str, h_columns_float[colname], 1);
else
pfor_compress( (int_type*)d + new_offset, (mCount - partition_recs*p)*float_size, str, h_columns_float[colname], 1);
new_offset = new_offset + partition_recs;
total_segments++;
};
}
else {
thrust::copy(h_columns_float[colname].begin() + offset, h_columns_float[colname].begin() + offset + mCount, d_col);
thrust::device_ptr<long long int> d_col_dec((long long int*)d);
thrust::transform(d_col,d_col+mCount,d_col_dec, float_to_long());
pfor_compress( d, mCount*float_size, str, h_columns_float[colname], 1);
};
}
else { // do not compress -- float
thrust::device_ptr<float_type> d_col((float_type*)d);
if(!op_sort.empty()) {
thrust::gather(permutation.begin(), permutation.end(), d_columns_float[colname].begin(), d_col);
thrust::copy(d_col, d_col+mRecCount, h_columns_float[colname].begin());
for(unsigned int p = 0; p < partition_count; p++) {
str = file_name + "." + colname;
curr_file = str;
str += "." + to_string(total_segments-1);
unsigned int curr_cnt;
if (p < partition_count - 1)
curr_cnt = partition_recs;
else
curr_cnt = mCount - partition_recs*p;
fstream binary_file(str.c_str(),ios::out|ios::binary|fstream::app);
binary_file.write((char *)&curr_cnt, 4);
binary_file.write((char *)(h_columns_float[colname].data() + new_offset),curr_cnt*float_size);
new_offset = new_offset + partition_recs;
unsigned int comp_type = 3;
binary_file.write((char *)&comp_type, 4);
binary_file.close();
};
}
else {
fstream binary_file(str.c_str(),ios::out|ios::binary|fstream::app);
binary_file.write((char *)&mCount, 4);
binary_file.write((char *)(h_columns_float[colname].data() + offset),mCount*float_size);
unsigned int comp_type = 3;
binary_file.write((char *)&comp_type, 4);
binary_file.close();
};
};
}
else { //char
//populate char_hash
if(append && char_hash[colname].size() == 0) {
string s= file_name + "." + colname;
ifstream binary_file(s.c_str(), ios::binary | ios::ate);
if(binary_file) {
auto sz = binary_file.tellg();
binary_file.seekg(0, binary_file.beg);
char* strings = new char[sz];
binary_file.read(strings, sz);
binary_file.close();
//unsigned int ind = std::find(columnNames.begin(), columnNames.end(), colname) - columnNames.begin();
for (unsigned int z = 0 ; z < sz/char_size[colname]; z++) {
char_hash[colname][MurmurHash64A(&strings[z*char_size[colname]], char_size[colname], hash_seed)/2] = z;
};
delete [] strings;
};
};
if(!op_sort.empty()) {
unsigned int* h_permutation = new unsigned int[mRecCount];
thrust::copy(permutation.begin(), permutation.end(), h_permutation);
char* t = new char[char_size[colname]*mRecCount];
apply_permutation_char_host(h_columns_char[colname], h_permutation, mRecCount, t, char_size[colname]);
delete [] h_permutation;
thrust::copy(t, t+ char_size[colname]*mRecCount, h_columns_char[colname]);
delete [] t;
for(unsigned int p = 0; p < partition_count; p++) {
str = file_name + "." + colname;
curr_file = str;
str += "." + to_string(total_segments-1);
if (p < partition_count - 1)
compress_char(str, colname, partition_recs, new_offset, total_segments-1);
else
compress_char(str, colname, mCount - partition_recs*p, new_offset, total_segments-1);
new_offset = new_offset + partition_recs;
total_segments++;
};
}
else {
compress_char(str, colname, mCount, offset, total_segments-1);
};
};
if((check_type == 1 && fact_file_loaded) || (check_type == 1 && check_val == 0)) {
if(!op_sort.empty())
writeHeader(file_name, colname, total_segments-1);
else {
writeHeader(file_name, colname, total_segments);
};
};
total_segments = old_segments;
};
hipFree(d);
if(!op_sort.empty()) {
total_segments = (old_segments-1)+partition_count;
};
permutation.resize(0);
permutation.shrink_to_fit();
}
void CudaSet::writeHeader(string file_name, string colname, unsigned int tot_segs) {
string str = file_name + "." + colname;
string ff = str;
str += ".header";
fstream binary_file(str.c_str(),ios::out|ios::binary|ios::trunc);
binary_file.write((char *)&total_count, 8);
binary_file.write((char *)&tot_segs, 4);
binary_file.write((char *)&total_max, 4);
binary_file.write((char *)&cnt_counts[ff], 4);
//cout << "HEADER1 " << total_count << " " << tot_segs << " " << total_max << endl;
binary_file.close();
};
void CudaSet::reWriteHeader(string file_name, string colname, unsigned int tot_segs, size_t newRecs, size_t maxRecs1) {
string str = file_name + "." + colname;
string ff = str;
str += ".header";
fstream binary_file(str.c_str(),ios::out|ios::binary|ios::trunc);
binary_file.write((char *)&newRecs, 8);
binary_file.write((char *)&tot_segs, 4);
binary_file.write((char *)&maxRecs1, 4);
//cout << "HEADER2 " << newRecs << endl;
binary_file.close();
};
void CudaSet::writeSortHeader(string file_name)
{
string str(file_name);
unsigned int idx;
if(!op_sort.empty()) {
str += ".sort";
fstream binary_file(str.c_str(),ios::out|ios::binary|ios::trunc);
idx = (unsigned int)op_sort.size();
binary_file.write((char *)&idx, 4);
queue<string> os(op_sort);
while(!os.empty()) {
if(verbose)
cout << "sorted on " << idx << endl;
idx = os.front().size();
binary_file.write((char *)&idx, 4);
binary_file.write(os.front().data(), idx);
os.pop();
};
binary_file.close();
}
else {
str += ".sort";
remove(str.c_str());
};
str = file_name;
if(!op_presort.empty()) {
str += ".presort";
fstream binary_file(str.c_str(),ios::out|ios::binary|ios::trunc);
idx = (unsigned int)op_presort.size();
binary_file.write((char *)&idx, 4);
queue<string> os(op_presort);
while(!os.empty()) {
idx = os.front().size();
binary_file.write((char *)&idx, 4);
binary_file.write(os.front().data(), idx);
os.pop();
};
binary_file.close();
}
else {
str += ".presort";
remove(str.c_str());
};
}
using namespace mgpu;
void CudaSet::Display(unsigned int limit, bool binary, bool term)
{
#define MAXCOLS 128
#define MAXFIELDSIZE 1400
//-- This should/will be converted to an array holding pointers of malloced sized structures--
char bigbuf[MAXCOLS * MAXFIELDSIZE];
memset(bigbuf, 0, MAXCOLS * MAXFIELDSIZE);
char *fields[MAXCOLS];
const char *dcolumns[MAXCOLS];
size_t mCount; // num records in play
bool print_all = 0;
string ss, str;
int rows = 0;
if(limit != 0 && limit < mRecCount)
mCount = limit;
else {
mCount = mRecCount;
print_all = 1;
};
cout << "mRecCount=" << mRecCount << " mcount = " << mCount << " term " << term << " limit=" << limit << " print_all=" << print_all << endl;
unsigned int cc =0;
unordered_map<string, FILE*> file_map;
unordered_map<string, unsigned int> len_map;
for(unsigned int i = 0; i < columnNames.size(); i++)
{
fields[cc] = &(bigbuf[cc*MAXFIELDSIZE]); // a hack to avoid malloc overheads - refine later
dcolumns[cc++] = columnNames[i].c_str();
if(string_map.find(columnNames[i]) != string_map.end()) {
auto s = string_map[columnNames[i]];
auto pos = s.find_first_of(".");
auto len = data_dict[s.substr(0, pos)][s.substr(pos+1)].col_length;
FILE *f;
f = fopen(string_map[columnNames[i]].c_str(), "rb");
file_map[string_map[columnNames[i]]] = f;
len_map[string_map[columnNames[i]]] = len;
};
};
// The goal here is to loop fast and avoid any double handling of outgoing data - pointers are good.
if(not_compressed && prm_d.size() == 0) {
for(unsigned int i=0; i < mCount; i++) { // for each record
for(unsigned int j=0; j < columnNames.size(); j++) { // for each col
if (type[columnNames[j]] != 1) {
if(string_map.find(columnNames[j]) == string_map.end()) {
if(decimal_zeroes[columnNames[j]]) {
str = std::to_string(h_columns_int[columnNames[j]][i]);
//cout << "decimals " << columnNames[j] << " " << decimal_zeroes[columnNames[j]] << " " << h_columns_int[columnNames[j]][i] << endl;
while(str.length() <= decimal_zeroes[columnNames[j]])
str = '0' + str;
str.insert(str.length()- decimal_zeroes[columnNames[j]], ".");
sprintf(fields[j], "%s", str.c_str());
}
else {
if(!ts_cols[columnNames[j]])
sprintf(fields[j], "%lld", (h_columns_int[columnNames[j]])[i] );
else {
time_t ts = (h_columns_int[columnNames[j]][i])/1000;
auto ti = gmtime(&ts);
char buffer[30];
auto rem = (h_columns_int[columnNames[j]][i])%1000;
strftime(buffer,30,"%Y-%m-%d %H.%M.%S", ti);
//fprintf(file_pr, "%s", buffer);
//fprintf(file_pr, ".%d", rem);
sprintf(fields[j], "%s.%d", buffer,rem);
/*time_t tt = h_columns_int[columnNames[j]][i];
auto ti = localtime(&tt);
char buffer[10];
strftime(buffer,80,"%Y-%m-%d", ti);
sprintf(fields[j], "%s", buffer);
*/
};
};
}
else {
fseek(file_map[string_map[columnNames[j]]], h_columns_int[columnNames[j]][i] * len_map[string_map[columnNames[j]]], SEEK_SET);
fread(fields[j], 1, len_map[string_map[columnNames[j]]], file_map[string_map[columnNames[j]]]);
fields[j][len_map[string_map[columnNames[j]]]] ='\0'; // zero terminate string
};
}
else
sprintf(fields[j], "%.2f", (h_columns_float[columnNames[j]])[i] );
};
row_cb(mColumnCount, (char **)fields, (char **)dcolumns);
rows++;
};
}
else {
queue<string> op_vx;
for(unsigned int i = 0; i < columnNames.size(); i++)
op_vx.push(columnNames[i]);
if(prm_d.size() || source) {
allocColumns(this, op_vx);
};
unsigned int curr_seg = 0;
size_t cnt = 0;
size_t curr_count, sum_printed = 0;
resize(maxRecs);
while(sum_printed < mCount || print_all) {
if(prm_d.size() || source) { // if host arrays are empty
copyColumns(this, op_vx, curr_seg, cnt);
size_t olRecs = mRecCount;
mRecCount = olRecs;
CopyToHost(0,mRecCount);
if(sum_printed + mRecCount <= mCount || print_all)
curr_count = mRecCount;
else
curr_count = mCount - sum_printed;
}
else
curr_count = mCount;
sum_printed = sum_printed + mRecCount;
for(unsigned int i=0; i < curr_count; i++) {
for(unsigned int j=0; j < columnNames.size(); j++) {
if (type[columnNames[j]] != 1) {
if(string_map.find(columnNames[j]) == string_map.end())
sprintf(fields[j], "%lld", (h_columns_int[columnNames[j]])[i] );
else {
fseek(file_map[string_map[columnNames[j]]], h_columns_int[columnNames[j]][i] * len_map[string_map[columnNames[j]]], SEEK_SET);
fread(fields[j], 1, len_map[string_map[columnNames[j]]], file_map[string_map[columnNames[j]]]);
fields[j][len_map[string_map[columnNames[j]]]] ='\0'; // zero terminate string
};
}
else
sprintf(fields[j], "%.2f", (h_columns_float[columnNames[j]])[i] );
};
row_cb(mColumnCount, (char **)fields, (char**)dcolumns);
rows++;
};
curr_seg++;
if(curr_seg == segCount)
print_all = 0;
};
}; // end else
for(auto it = file_map.begin(); it != file_map.end(); it++)
fclose(it->second);
}
void CudaSet::Store(const string file_name, const char* sep, const unsigned int limit, const bool binary, const bool append, const bool term)
{
if (mRecCount == 0 && binary == 1 && !term) { // write tails
for(unsigned int j=0; j < columnNames.size(); j++) {
writeHeader(file_name, columnNames[j], total_segments);
};
return;
};
size_t mCount;
bool print_all = 0;
string str;
if(limit != 0 && limit < mRecCount)
mCount = limit;
else {
mCount = mRecCount;
print_all = 1;
};
if(binary == 0) {
unordered_map<string, FILE*> file_map;
unordered_map<string, unsigned int> len_map;
string bf;
unsigned int max_len = 0;
for(unsigned int j=0; j < columnNames.size(); j++) {
if(string_map.find(columnNames[j]) != string_map.end()) {
auto s = string_map[columnNames[j]];
auto pos = s.find_first_of(".");
auto len = data_dict[s.substr(0, pos)][s.substr(pos+1)].col_length;
if(len > max_len)
max_len = len;
FILE *f;
f = fopen(string_map[columnNames[j]].c_str(), "rb");
file_map[string_map[columnNames[j]]] = f;
len_map[string_map[columnNames[j]]] = len;
};
};
bf.reserve(max_len);
FILE *file_pr;
if(!term) {
file_pr = fopen(file_name.c_str(), "w");
if (!file_pr)
cout << "Could not open file " << file_name << endl;
}
else
file_pr = stdout;
if(not_compressed && prm_d.size() == 0) {
for(unsigned int i=0; i < mCount; i++) {
for(unsigned int j=0; j < columnNames.size(); j++) {
if (type[columnNames[j]] != 1 ) {
if(string_map.find(columnNames[j]) == string_map.end()) {
if(decimal_zeroes[columnNames[j]]) {
str = std::to_string(h_columns_int[columnNames[j]][i]);
//cout << "decimals " << columnNames[j] << " " << decimal_zeroes[columnNames[j]] << " " << h_columns_int[columnNames[j]][i] << endl;
while(str.length() <= decimal_zeroes[columnNames[j]])
str = '0' + str;
str.insert(str.length()- decimal_zeroes[columnNames[j]], ".");
fprintf(file_pr, "%s", str.c_str());
}
else {
if(!ts_cols[columnNames[j]]) {
fprintf(file_pr, "%lld", (h_columns_int[columnNames[j]])[i]);
}
else {
time_t ts = (h_columns_int[columnNames[j]][i])/1000;
auto ti = gmtime(&ts);
char buffer[30];
auto rem = (h_columns_int[columnNames[j]][i])%1000;
strftime(buffer,30,"%Y-%m-%d %H.%M.%S", ti);
fprintf(file_pr, "%s", buffer);
fprintf(file_pr, ".%d", rem);
};
};
}
else {
fseek(file_map[string_map[columnNames[j]]], h_columns_int[columnNames[j]][i] * len_map[string_map[columnNames[j]]], SEEK_SET);
fread(&bf[0], 1, len_map[string_map[columnNames[j]]], file_map[string_map[columnNames[j]]]);
fprintf(file_pr, "%.*s", len_map[string_map[columnNames[j]]], bf.c_str());
};
fputs(sep, file_pr);
}
else {
fprintf(file_pr, "%.2f", (h_columns_float[columnNames[j]])[i]);
fputs(sep, file_pr);
}
};
if (i != mCount -1 )
fputs("\n",file_pr);
};
if(!term)
fclose(file_pr);
}
else {
queue<string> op_vx;
string ss;
for(unsigned int j=0; j < columnNames.size(); j++)
op_vx.push(columnNames[j]);
if(prm_d.size() || source) {
allocColumns(this, op_vx);
};
unsigned int curr_seg = 0;
size_t cnt = 0;
size_t curr_count, sum_printed = 0;
mRecCount = 0;
resize(maxRecs);
while(sum_printed < mCount || print_all) {
if(prm_d.size() || source) {
copyColumns(this, op_vx, curr_seg, cnt);
if(curr_seg == 0) {
if(limit != 0 && limit < mRecCount) {
mCount = limit;
print_all = 0;
}
else {
mCount = mRecCount;
print_all = 1;
};
};
// if host arrays are empty
size_t olRecs = mRecCount;
mRecCount = olRecs;
CopyToHost(0,mRecCount);
//cout << "start " << sum_printed << " " << mRecCount << " " << mCount << endl;
if(sum_printed + mRecCount <= mCount || print_all) {
curr_count = mRecCount;
}
else {
curr_count = mCount - sum_printed;
};
}
else {
curr_count = mCount;
};
sum_printed = sum_printed + mRecCount;
//cout << "sum printed " << sum_printed << " " << curr_count << " " << curr_seg << endl;
for(unsigned int i=0; i < curr_count; i++) {
for(unsigned int j=0; j < columnNames.size(); j++) {
if (type[columnNames[j]] != 1) {
if(string_map.find(columnNames[j]) == string_map.end()) {
if(decimal_zeroes[columnNames[j]]) {
str = std::to_string(h_columns_int[columnNames[j]][i]);
//cout << "decimals " << columnNames[j] << " " << decimal_zeroes[columnNames[j]] << " " << h_columns_int[columnNames[j]][i] << endl;
while(str.length() <= decimal_zeroes[columnNames[j]])
str = '0' + str;
str.insert(str.length()- decimal_zeroes[columnNames[j]], ".");
fprintf(file_pr, "%s", str.c_str());
}
else {
if(!ts_cols[columnNames[j]]) {
fprintf(file_pr, "%lld", (h_columns_int[columnNames[j]])[i]);
}
else {
time_t ts = (h_columns_int[columnNames[j]][i])/1000;
auto ti = gmtime(&ts);
char buffer[30];
auto rem = (h_columns_int[columnNames[j]][i])%1000;
strftime(buffer,30,"%Y-%m-%d %H.%M.%S", ti);
fprintf(file_pr, "%s", buffer);
fprintf(file_pr, ".%d", rem);
};
};
}
else {
fseek(file_map[string_map[columnNames[j]]], h_columns_int[columnNames[j]][i] * len_map[string_map[columnNames[j]]], SEEK_SET);
fread(&bf[0], 1, len_map[string_map[columnNames[j]]], file_map[string_map[columnNames[j]]]);
fprintf(file_pr, "%.*s", len_map[string_map[columnNames[j]]], bf.c_str());
};
fputs(sep, file_pr);
}
else {
fprintf(file_pr, "%.2f", (h_columns_float[columnNames[j]])[i]);
fputs(sep, file_pr);
};
};
if (i != mCount -1 && (curr_seg != segCount || i < curr_count))
fputs("\n",file_pr);
};
curr_seg++;
if(curr_seg == segCount)
print_all = 0;
};
if(!term) {
fclose(file_pr);
};
};
for(auto it = file_map.begin(); it != file_map.end(); it++)
fclose(it->second);
}
else {
//lets update the data dictionary
for(unsigned int j=0; j < columnNames.size(); j++) {
data_dict[file_name][columnNames[j]].col_type = type[columnNames[j]];
if(type[columnNames[j]] != 2) {
if(decimal[columnNames[j]])
data_dict[file_name][columnNames[j]].col_length = decimal_zeroes[columnNames[j]];
else
if (ts_cols[columnNames[j]])
data_dict[file_name][columnNames[j]].col_length = UINT_MAX;
else
data_dict[file_name][columnNames[j]].col_length = 0;
}
else
data_dict[file_name][columnNames[j]].col_length = char_size[columnNames[j]];
};
save_dict = 1;
if(text_source) { //writing a binary file using a text file as a source
compress(file_name, 0, 1, 0, mCount, append);
for(unsigned int i = 0; i< columnNames.size(); i++)
if(type[columnNames[i]] == 2)
deAllocColumnOnDevice(columnNames[i]);
}
else { //writing a binary file using a binary file as a source
fact_file_loaded = 1;
size_t offset = 0;
if(!not_compressed) { // records are compressed, for example after filter op.
//decompress to host
queue<string> op_vx;
for(unsigned int i = 0; i< columnNames.size(); i++) {
op_vx.push(columnNames[i]);
};
allocColumns(this, op_vx);
size_t oldCnt = mRecCount;
mRecCount = 0;
resize(oldCnt);
mRecCount = oldCnt;
for(unsigned int i = 0; i < segCount; i++) {
size_t cnt = 0;
copyColumns(this, op_vx, i, cnt);
CopyToHost(0, mRecCount);
offset = offset + mRecCount;
compress(file_name, 0, 0, i - (segCount-1), mRecCount, append);
};
}
else {
// now we have decompressed records on the host
//call setSegments and compress columns in every segment
segCount = (mRecCount/process_count + 1);
offset = 0;
for(unsigned int z = 0; z < segCount; z++) {
if(z < segCount-1) {
if(mRecCount < process_count) {
mCount = mRecCount;
}
else {
mCount = process_count;
}
}
else {
mCount = mRecCount - (segCount-1)*process_count;
};
compress(file_name, offset, 0, z - (segCount-1), mCount, append);
offset = offset + mCount;
};
};
};
};
}
void CudaSet::compress_char(const string file_name, const string colname, const size_t mCount, const size_t offset, const unsigned int segment)
{
unsigned int len = char_size[colname];
string h_name, i_name, file_no_seg = file_name.substr(0, file_name.find_last_of("."));
i_name = file_no_seg + "." + to_string(segment) + ".idx";
h_name = file_no_seg + "." + to_string(segment) + ".hash";
fstream b_file_str, loc_hashes;
fstream binary_file_h(h_name.c_str(),ios::out|ios::binary|ios::trunc);
binary_file_h.write((char *)&mCount, 4);
if(segment == 0) {
b_file_str.open(file_no_seg.c_str(),ios::out|ios::binary|ios::trunc);
}
else {
b_file_str.open(file_no_seg.c_str(),ios::out|ios::binary|ios::app);
};
if(h_columns_int.find(colname) == h_columns_int.end()) {
h_columns_int[colname] = thrust::host_vector<int_type >(mCount);
}
else {
if(h_columns_int[colname].size() < mCount)
h_columns_int[colname].resize(mCount);
};
if(d_columns_int.find(colname) == d_columns_int.end()) {
d_columns_int[colname] = thrust::device_vector<int_type >(mCount);
}
else {
if(d_columns_int[colname].size() < mCount)
d_columns_int[colname].resize(mCount);
};
size_t cnt;
long long int* hash_array = new long long int[mCount];
unordered_map<unsigned long long int, size_t>::iterator iter;
vector<int_type> test(mCount);
for (unsigned int i = 0 ; i < mCount; i++) {
hash_array[i] = MurmurHash64A(h_columns_char[colname] + (i+offset)*len, len, hash_seed)/2;
iter = char_hash[colname].find(hash_array[i]);
if(iter == char_hash[colname].end()) {
cnt = char_hash[colname].size();
char_hash[colname][hash_array[i]] = cnt;
b_file_str.write((char *)h_columns_char[colname] + (i+offset)*len, len);
//h_columns_int[colname][i] = cnt;
test[i] = cnt;
}
else {
//h_columns_int[colname][i] = iter->second;
test[i] = iter->second;
};
};
memcpy(h_columns_int[colname].data(), test.data(), mCount*8);
binary_file_h.write((char *)hash_array, 8*mCount);
delete [] hash_array;
thrust::device_vector<int_type> d_col(mCount);
thrust::copy(h_columns_int[colname].begin(), h_columns_int[colname].begin() + mCount, d_col.begin());
pfor_compress(thrust::raw_pointer_cast(d_col.data()), mCount*int_size, i_name, h_columns_int[colname], 0);
binary_file_h.close();
b_file_str.close();
};
bool first_time = 1;
size_t rec_sz = 0;
size_t process_piece;
bool CudaSet::LoadBigFile(FILE* file_p, thrust::device_vector<char>& d_readbuff, thrust::device_vector<char*>& dest,
thrust::device_vector<unsigned int>& ind, thrust::device_vector<unsigned int>& dest_len)
{
const char* sep = separator.c_str();
unsigned int maxx = cols.rbegin()->first;
map<unsigned int, string>::iterator it;
bool done = 0;
std::clock_t start1 = std::clock();
vector<int> types;
vector<int> cl;
types.push_back(0);
for(int i = 0; i < maxx; i++) {
auto iter = cols.find(i+1);
if(iter != cols.end()) {
types.push_back(type[iter->second]);
cl.push_back(iter->first-1);
}
else
types.push_back(0);
};
if(first_time) {
if(process_count*4 > getFreeMem()) {
process_piece = getFreeMem()/4;
}
else
process_piece = process_count;
readbuff = new char[process_piece+1];
d_readbuff.resize(process_piece+1);
cout << "set a piece to " << process_piece << " " << getFreeMem() << endl;
};
thrust::device_vector<unsigned int> ind_cnt(1);
thrust::device_vector<char> sepp(1);
sepp[0] = *sep;
long long int total_processed = 0;
size_t recs_processed = 0;
bool finished = 0;
thrust::device_vector<long long int> dev_pos;
long long int offset;
unsigned int cnt = 1;
const unsigned int max_len = 23;
while(!done) {
auto rb = fread(readbuff, 1, process_piece, file_p);
if(rb < process_piece) {
done = 1;
finished = 1;
fclose(file_p);
};
if(total_processed >= process_count)
done = 1;
thrust::fill(d_readbuff.begin(), d_readbuff.end(),0);
thrust::copy(readbuff, readbuff+rb, d_readbuff.begin());
auto curr_cnt = thrust::count(d_readbuff.begin(), d_readbuff.begin() + rb, '\n') - 1;
if(recs_processed == 0 && first_time) {
rec_sz = curr_cnt;
if(finished)
rec_sz++;
total_max = curr_cnt;
};
if(first_time) {
for(unsigned int i=0; i < columnNames.size(); i++) {
auto colname = columnNames[i];
if (type[colname] == 0) {
d_columns_int[colname].resize(d_columns_int[colname].size() + rec_sz);
h_columns_int[colname].resize(h_columns_int[colname].size() + rec_sz);
}
else
if (type[colname] == 1) {
d_columns_float[colname].resize(d_columns_float[colname].size() + rec_sz);
h_columns_float[colname].resize(h_columns_float[colname].size() + rec_sz);
}
else {
char* c = new char[cnt*rec_sz*char_size[columnNames[i]]];
if(recs_processed > 0) {
memcpy(c, h_columns_char[columnNames[i]], recs_processed*char_size[columnNames[i]]);
delete [] h_columns_char[columnNames[i]];
};
h_columns_char[columnNames[i]] = c;
if(recs_processed == 0) {
void* temp;
CUDA_SAFE_CALL(hipMalloc((void **) &temp, char_size[columnNames[i]]*rec_sz));
hipMemset(temp,0,char_size[columnNames[i]]*rec_sz);
d_columns_char[columnNames[i]] = (char*)temp;
};
};
if(recs_processed == 0) {
ind[i] = cl[i];
void* temp;
if(type[columnNames[i]] != 2) {
if(!ts_cols[columnNames[i]]) {
CUDA_SAFE_CALL(hipMalloc((void **) &temp, max_len*rec_sz));
dest_len[i] = max_len;
}
else {
CUDA_SAFE_CALL(hipMalloc((void **) &temp, 23*rec_sz));
dest_len[i] = 23;
}
}
else {
CUDA_SAFE_CALL(hipMalloc((void **) &temp, char_size[columnNames[i]]*rec_sz));
dest_len[i] = char_size[columnNames[i]];
};
dest[i] = (char*)temp;
};
};
};
for(unsigned int i=0; i < columnNames.size(); i++) {
if(type[columnNames[i]] != 2) {
hipMemset(dest[i],0,max_len*rec_sz);
}
else {
hipMemset(dest[i],0,char_size[columnNames[i]]*rec_sz);
};
};
if(dev_pos.size() < curr_cnt+1)
dev_pos.resize(curr_cnt+1); //avoiding the unnecessary allocs
dev_pos[0] = -1;
thrust::copy_if(thrust::make_counting_iterator((unsigned long long int)0), thrust::make_counting_iterator((unsigned long long int)rb-1),
d_readbuff.begin(), dev_pos.begin()+1, _1 == '\n');
if(!finished) {
if(curr_cnt < rec_sz) {
offset = (dev_pos[curr_cnt] - rb)+1;
//cout << "PATH 1 " << dev_pos[curr_cnt] << " " << offset << endl;
fseek(file_p, offset, SEEK_CUR);
total_processed = total_processed + rb + offset;
mRecCount = curr_cnt;
}
else {
offset = (dev_pos[rec_sz] - rb)+1;
//cout << "PATH 2 " << dev_pos[rec_sz] << " " << offset << endl;
fseek(file_p, offset, SEEK_CUR);
total_processed = total_processed + rb + offset;
mRecCount = rec_sz;
};
}
else {
mRecCount = curr_cnt + 1;
};
thrust::counting_iterator<unsigned int> begin(0);
ind_cnt[0] = mColumnCount;
parse_functor ff((const char*)thrust::raw_pointer_cast(d_readbuff.data()),(char**)thrust::raw_pointer_cast(dest.data()), thrust::raw_pointer_cast(ind.data()),
thrust::raw_pointer_cast(ind_cnt.data()), thrust::raw_pointer_cast(sepp.data()), thrust::raw_pointer_cast(dev_pos.data()), thrust::raw_pointer_cast(dest_len.data()));
thrust::for_each(begin, begin + mRecCount, ff);
ind_cnt[0] = max_len;
for(int i =0; i < mColumnCount; i++) {
if(type[columnNames[i]] == 0) { //int
thrust::device_ptr<char> p1((char*)dest[i]);
if(p1[4] == '-') { //date
if(!ts_cols[columnNames[i]]) {
gpu_date date_ff((const char*)dest[i],(long long int*)thrust::raw_pointer_cast(d_columns_int[columnNames[i]].data()) + recs_processed);
thrust::for_each(begin, begin + mRecCount, date_ff);
}
else {
gpu_tdate date_ff((const char*)dest[i],(long long int*)thrust::raw_pointer_cast(d_columns_int[columnNames[i]].data()) + recs_processed);
thrust::for_each(begin, begin + mRecCount, date_ff);
}
}
else { //int
if(decimal[columnNames[i]]) {
thrust::device_vector<unsigned int> scale(1);
scale[0] = decimal_zeroes[columnNames[i]];
gpu_atold atold((const char*)dest[i],(long long int*)thrust::raw_pointer_cast(d_columns_int[columnNames[i]].data()) + recs_processed,
thrust::raw_pointer_cast(ind_cnt.data()), thrust::raw_pointer_cast(scale.data()));
thrust::for_each(begin, begin + mRecCount, atold);
}
else {
gpu_atoll atoll_ff((const char*)dest[i],(long long int*)thrust::raw_pointer_cast(d_columns_int[columnNames[i]].data()) + recs_processed,
thrust::raw_pointer_cast(ind_cnt.data()));
thrust::for_each(begin, begin + mRecCount, atoll_ff);
};
};
thrust::copy(d_columns_int[columnNames[i]].begin() + recs_processed, d_columns_int[columnNames[i]].begin()+recs_processed+mRecCount, h_columns_int[columnNames[i]].begin() + recs_processed);
}
else
if(type[columnNames[i]] == 1) {
gpu_atof atof_ff((const char*)dest[i],(double*)thrust::raw_pointer_cast(d_columns_float[columnNames[i]].data()) + recs_processed,
thrust::raw_pointer_cast(ind_cnt.data()));
thrust::for_each(begin, begin + mRecCount, atof_ff);
thrust::copy(d_columns_float[columnNames[i]].begin() + recs_processed, d_columns_float[columnNames[i]].begin()+recs_processed+mRecCount, h_columns_float[columnNames[i]].begin() + recs_processed);
}
else {//char is already done
thrust::device_ptr<char> p1((char*)dest[i]);
hipMemcpy( h_columns_char[columnNames[i]] + char_size[columnNames[i]]*recs_processed, (void *)dest[i] , char_size[columnNames[i]]*mRecCount, hipMemcpyDeviceToHost);
};
};
recs_processed = recs_processed + mRecCount;
cnt++;
};
if(finished) {
for(int i =0; i < mColumnCount; i++) {
if(dest[i]) {
hipFree(dest[i]);
dest[i] = nullptr;
};
};
delete [] readbuff;
};
cout << "processed recs " << recs_processed << " " << getFreeMem() << endl;
first_time = 0;
mRecCount = recs_processed;
return finished;
};
void CudaSet::free() {
for(unsigned int i = 0; i < columnNames.size(); i++ ) {
if(type[columnNames[i]] == 0 && h_columns_int[columnNames[i]].size() ) {
h_columns_int[columnNames[i]].resize(0);
h_columns_int[columnNames[i]].shrink_to_fit();
}
else {
h_columns_float[columnNames[i]].resize(0);
h_columns_float[columnNames[i]].shrink_to_fit();
};
};
if(prm_d.size()) {
prm_d.resize(0);
prm_d.shrink_to_fit();
};
deAllocOnDevice();
};
void alloc_pool(unsigned int maxRecs) {
void* temp;
CUDA_SAFE_CALL(hipMalloc((void **) &temp, 8*maxRecs));
alloced_mem.push_back(temp);
};
bool* CudaSet::logical_and(bool* column1, bool* column2)
{
thrust::device_ptr<bool> dev_ptr1(column1);
thrust::device_ptr<bool> dev_ptr2(column2);
thrust::transform(dev_ptr1, dev_ptr1+mRecCount, dev_ptr2, dev_ptr1, thrust::logical_and<bool>());
thrust::device_free(dev_ptr2);
return column1;
}
bool* CudaSet::logical_or(bool* column1, bool* column2)
{
thrust::device_ptr<bool> dev_ptr1(column1);
thrust::device_ptr<bool> dev_ptr2(column2);
thrust::transform(dev_ptr1, dev_ptr1+mRecCount, dev_ptr2, dev_ptr1, thrust::logical_or<bool>());
thrust::device_free(dev_ptr2);
return column1;
}
bool* CudaSet::compare(int_type s, int_type d, int_type op_type)
{
bool res;
if (op_type == 2) // >
if(d>s)
res = 1;
else
res = 0;
else
if (op_type == 1) // <
if(d<s)
res = 1;
else
res = 0;
else
if (op_type == 6) // >=
if(d>=s)
res = 1;
else
res = 0;
else
if (op_type == 5) // <=
if(d<=s)
res = 1;
else
res = 0;
else
if (op_type == 4)// =
if(d==s)
res = 1;
else
res = 0;
else // !=
if(d!=s)
res = 1;
else
res = 0;
thrust::device_ptr<bool> p = thrust::device_malloc<bool>(mRecCount);
thrust::sequence(p, p+mRecCount,res,(bool)0);
return thrust::raw_pointer_cast(p);
};
bool* CudaSet::compare(float_type s, float_type d, int_type op_type)
{
bool res;
if (op_type == 2) // >
if ((d-s) > EPSILON)
res = 1;
else
res = 0;
else
if (op_type == 1) // <
if ((s-d) > EPSILON)
res = 1;
else
res = 0;
else
if (op_type == 6) // >=
if (((d-s) > EPSILON) || (((d-s) < EPSILON) && ((d-s) > -EPSILON)))
res = 1;
else
res = 0;
else
if (op_type == 5) // <=
if (((s-d) > EPSILON) || (((d-s) < EPSILON) && ((d-s) > -EPSILON)))
res = 1;
else
res = 0;
else
if (op_type == 4)// =
if (((d-s) < EPSILON) && ((d-s) > -EPSILON))
res = 1;
else
res = 0;
else // !=
if (!(((d-s) < EPSILON) && ((d-s) > -EPSILON)))
res = 1;
else
res = 0;
thrust::device_ptr<bool> p = thrust::device_malloc<bool>(mRecCount);
thrust::sequence(p, p+mRecCount,res,(bool)0);
return thrust::raw_pointer_cast(p);
}
bool* CudaSet::compare(float_type* column1, float_type d, int_type op_type)
{
thrust::device_ptr<bool> res = thrust::device_malloc<bool>(mRecCount);
thrust::device_ptr<float_type> dev_ptr(column1);
if (op_type == 2) // >
thrust::transform(dev_ptr, dev_ptr+mRecCount, thrust::make_constant_iterator(d), res, f_greater());
else
if (op_type == 1) // <
thrust::transform(dev_ptr, dev_ptr+mRecCount, thrust::make_constant_iterator(d), res, f_less());
else
if (op_type == 6) // >=
thrust::transform(dev_ptr, dev_ptr+mRecCount, thrust::make_constant_iterator(d), res, f_greater_equal_to());
else
if (op_type == 5) // <=
thrust::transform(dev_ptr, dev_ptr+mRecCount, thrust::make_constant_iterator(d), res, f_less_equal());
else
if (op_type == 4)// =
thrust::transform(dev_ptr, dev_ptr+mRecCount, thrust::make_constant_iterator(d), res, f_equal_to());
else // !=
thrust::transform(dev_ptr, dev_ptr+mRecCount, thrust::make_constant_iterator(d), res, f_not_equal_to());
return thrust::raw_pointer_cast(res);
}
bool* CudaSet::compare(int_type* column1, int_type d, int_type op_type, unsigned int p1, unsigned int p2)
{
thrust::device_ptr<bool> temp = thrust::device_malloc<bool>(mRecCount);
thrust::device_ptr<int_type> dev_ptr(column1);
if(p2)
d = d*(int_type)pow(10, p2);
if (op_type == 2) // >
if(!p1)
thrust::transform(dev_ptr, dev_ptr+mRecCount, thrust::make_constant_iterator(d), temp, thrust::greater<int_type>());
else
thrust::transform(thrust::make_transform_iterator(dev_ptr, power_functor<int_type>(p1)), thrust::make_transform_iterator(dev_ptr+mRecCount, power_functor<int_type>(p1)), thrust::make_constant_iterator(d), temp, thrust::greater<int_type>());
else
if (op_type == 1) // <
if(!p1)
thrust::transform(dev_ptr, dev_ptr+mRecCount, thrust::make_constant_iterator(d), temp, thrust::less<int_type>());
else
thrust::transform(thrust::make_transform_iterator(dev_ptr, power_functor<int_type>(p1)), thrust::make_transform_iterator(dev_ptr+mRecCount, power_functor<int_type>(p1)), thrust::make_constant_iterator(d), temp, thrust::less<int_type>());
else
if (op_type == 6) // >=
if(!p1)
thrust::transform(dev_ptr, dev_ptr+mRecCount, thrust::make_constant_iterator(d), temp, thrust::greater_equal<int_type>());
else
thrust::transform(thrust::make_transform_iterator(dev_ptr, power_functor<int_type>(p1)), thrust::make_transform_iterator(dev_ptr+mRecCount, power_functor<int_type>(p1)), thrust::make_constant_iterator(d), temp, thrust::greater_equal<int_type>());
else
if (op_type == 5) // <=
if(!p1)
thrust::transform(dev_ptr, dev_ptr+mRecCount, thrust::make_constant_iterator(d), temp, thrust::less_equal<int_type>());
else
thrust::transform(thrust::make_transform_iterator(dev_ptr, power_functor<int_type>(p1)), thrust::make_transform_iterator(dev_ptr+mRecCount, power_functor<int_type>(p1)), thrust::make_constant_iterator(d), temp, thrust::less_equal<int_type>());
else
if (op_type == 4)// =
if(!p1)
thrust::transform(dev_ptr, dev_ptr+mRecCount, thrust::make_constant_iterator(d), temp, thrust::equal_to<int_type>());
else
thrust::transform(thrust::make_transform_iterator(dev_ptr, power_functor<int_type>(p1)), thrust::make_transform_iterator(dev_ptr+mRecCount, power_functor<int_type>(p1)), thrust::make_constant_iterator(d), temp, thrust::equal_to<int_type>());
else // !=
if(!p1)
thrust::transform(dev_ptr, dev_ptr+mRecCount, thrust::make_constant_iterator(d), temp, thrust::not_equal_to<int_type>());
else
thrust::transform(thrust::make_transform_iterator(dev_ptr, power_functor<int_type>(p1)), thrust::make_transform_iterator(dev_ptr+mRecCount, power_functor<int_type>(p1)), thrust::make_constant_iterator(d), temp, thrust::not_equal_to<int_type>());
return thrust::raw_pointer_cast(temp);
}
bool* CudaSet::compare(int_type* column1, int_type* column2, int_type op_type, unsigned int p1, unsigned int p2)
{
thrust::device_ptr<int_type> dev_ptr1(column1);
thrust::device_ptr<int_type> dev_ptr2(column2);
thrust::device_ptr<bool> temp = thrust::device_malloc<bool>(mRecCount);
if (op_type == 2) // >
if(!p1 && !p2) {
thrust::transform(dev_ptr1, dev_ptr1+mRecCount, dev_ptr2, temp, thrust::greater<int_type>());
}
else
if(p1 && p2)
thrust::transform(thrust::make_transform_iterator(dev_ptr1, power_functor<int_type>(p1)), thrust::make_transform_iterator(dev_ptr1+mRecCount, power_functor<int_type>(p1)), thrust::make_transform_iterator(dev_ptr2+mRecCount, power_functor<int_type>(p2)), temp, thrust::greater<int_type>());
else
if(p1)
thrust::transform(thrust::make_transform_iterator(dev_ptr1, power_functor<int_type>(p1)), thrust::make_transform_iterator(dev_ptr1+mRecCount, power_functor<int_type>(p1)), dev_ptr2, temp, thrust::greater<int_type>());
else
thrust::transform(dev_ptr1, dev_ptr1+mRecCount, thrust::make_transform_iterator(dev_ptr2+mRecCount, power_functor<int_type>(p2)), temp, thrust::greater<int_type>());
else
if (op_type == 1) // <
if(!p1 && !p2)
thrust::transform(dev_ptr1, dev_ptr1+mRecCount, dev_ptr2, temp, thrust::less<int_type>());
else
if(p1 && p2)
thrust::transform(thrust::make_transform_iterator(dev_ptr1, power_functor<int_type>(p1)), thrust::make_transform_iterator(dev_ptr1+mRecCount, power_functor<int_type>(p1)), thrust::make_transform_iterator(dev_ptr2+mRecCount, power_functor<int_type>(p2)), temp, thrust::less<int_type>());
else
if(p1)
thrust::transform(thrust::make_transform_iterator(dev_ptr1, power_functor<int_type>(p1)), thrust::make_transform_iterator(dev_ptr1+mRecCount, power_functor<int_type>(p1)), dev_ptr2, temp, thrust::less<int_type>());
else
thrust::transform(dev_ptr1, dev_ptr1+mRecCount, thrust::make_transform_iterator(dev_ptr2+mRecCount, power_functor<int_type>(p2)), temp, thrust::less<int_type>());
else
if (op_type == 6) // >=
if(!p1 && !p2)
thrust::transform(dev_ptr1, dev_ptr1+mRecCount, dev_ptr2, temp, thrust::greater_equal<int_type>());
else
if(p1 && p2)
thrust::transform(thrust::make_transform_iterator(dev_ptr1, power_functor<int_type>(p1)), thrust::make_transform_iterator(dev_ptr1+mRecCount, power_functor<int_type>(p1)), thrust::make_transform_iterator(dev_ptr2+mRecCount, power_functor<int_type>(p2)), temp, thrust::greater_equal<int_type>());
else
if(p1)
thrust::transform(thrust::make_transform_iterator(dev_ptr1, power_functor<int_type>(p1)), thrust::make_transform_iterator(dev_ptr1+mRecCount, power_functor<int_type>(p1)), dev_ptr2, temp, thrust::greater_equal<int_type>());
else
thrust::transform(dev_ptr1, dev_ptr1+mRecCount, thrust::make_transform_iterator(dev_ptr2+mRecCount, power_functor<int_type>(p2)), temp, thrust::greater_equal<int_type>());
else
if (op_type == 5) // <=
if(!p1 && !p2)
thrust::transform(dev_ptr1, dev_ptr1+mRecCount, dev_ptr2, temp, thrust::less_equal<int_type>());
else
if(p1 && p2)
thrust::transform(thrust::make_transform_iterator(dev_ptr1, power_functor<int_type>(p1)), thrust::make_transform_iterator(dev_ptr1+mRecCount, power_functor<int_type>(p1)), thrust::make_transform_iterator(dev_ptr2+mRecCount, power_functor<int_type>(p2)), temp, thrust::less_equal<int_type>());
else
if(p1)
thrust::transform(thrust::make_transform_iterator(dev_ptr1, power_functor<int_type>(p1)), thrust::make_transform_iterator(dev_ptr1+mRecCount, power_functor<int_type>(p1)), dev_ptr2, temp, thrust::less_equal<int_type>());
else
thrust::transform(dev_ptr1, dev_ptr1+mRecCount, thrust::make_transform_iterator(dev_ptr2+mRecCount, power_functor<int_type>(p2)), temp, thrust::less_equal<int_type>());
else
if (op_type == 4)// =
if(!p1 && !p2)
thrust::transform(dev_ptr1, dev_ptr1+mRecCount, dev_ptr2, temp, thrust::equal_to<int_type>());
else
if(p1 && p2)
thrust::transform(thrust::make_transform_iterator(dev_ptr1, power_functor<int_type>(p1)), thrust::make_transform_iterator(dev_ptr1+mRecCount, power_functor<int_type>(p1)), thrust::make_transform_iterator(dev_ptr2+mRecCount, power_functor<int_type>(p2)), temp, thrust::equal_to<int_type>());
else
if(p1)
thrust::transform(thrust::make_transform_iterator(dev_ptr1, power_functor<int_type>(p1)), thrust::make_transform_iterator(dev_ptr1+mRecCount, power_functor<int_type>(p1)), dev_ptr2, temp, thrust::equal_to<int_type>());
else
thrust::transform(dev_ptr1, dev_ptr1+mRecCount, thrust::make_transform_iterator(dev_ptr2+mRecCount, power_functor<int_type>(p2)), temp, thrust::equal_to<int_type>());
else // !=
if(!p1 && !p2)
thrust::transform(dev_ptr1, dev_ptr1+mRecCount, dev_ptr2, temp, thrust::not_equal_to<int_type>());
else
if(p1 && p2)
thrust::transform(thrust::make_transform_iterator(dev_ptr1, power_functor<int_type>(p1)), thrust::make_transform_iterator(dev_ptr1+mRecCount, power_functor<int_type>(p1)), thrust::make_transform_iterator(dev_ptr2+mRecCount, power_functor<int_type>(p2)), temp, thrust::not_equal_to<int_type>());
else
if(p1)
thrust::transform(thrust::make_transform_iterator(dev_ptr1, power_functor<int_type>(p1)), thrust::make_transform_iterator(dev_ptr1+mRecCount, power_functor<int_type>(p1)), dev_ptr2, temp, thrust::not_equal_to<int_type>());
else
thrust::transform(dev_ptr1, dev_ptr1+mRecCount, thrust::make_transform_iterator(dev_ptr2+mRecCount, power_functor<int_type>(p2)), temp, thrust::not_equal_to<int_type>());
return thrust::raw_pointer_cast(temp);
}
bool* CudaSet::compare(float_type* column1, float_type* column2, int_type op_type)
{
thrust::device_ptr<float_type> dev_ptr1(column1);
thrust::device_ptr<float_type> dev_ptr2(column2);
thrust::device_ptr<bool> temp = thrust::device_malloc<bool>(mRecCount);
if (op_type == 2) // >
thrust::transform(dev_ptr1, dev_ptr1+mRecCount, dev_ptr2, temp, f_greater());
else
if (op_type == 1) // <
thrust::transform(dev_ptr1, dev_ptr1+mRecCount, dev_ptr2, temp, f_less());
else
if (op_type == 6) // >=
thrust::transform(dev_ptr1, dev_ptr1+mRecCount, dev_ptr2, temp, f_greater_equal_to());
else
if (op_type == 5) // <=
thrust::transform(dev_ptr1, dev_ptr1+mRecCount, dev_ptr2, temp, f_less_equal());
else
if (op_type == 4)// =
thrust::transform(dev_ptr1, dev_ptr1+mRecCount, dev_ptr2, temp, f_equal_to());
else // !=
thrust::transform(dev_ptr1, dev_ptr1+mRecCount, dev_ptr2, temp, f_not_equal_to());
return thrust::raw_pointer_cast(temp);
}
bool* CudaSet::compare(float_type* column1, int_type* column2, int_type op_type)
{
thrust::device_ptr<float_type> dev_ptr1(column1);
thrust::device_ptr<int_type> dev_ptr(column2);
thrust::device_ptr<float_type> dev_ptr2 = thrust::device_malloc<float_type>(mRecCount);
thrust::device_ptr<bool> temp = thrust::device_malloc<bool>(mRecCount);
thrust::transform(dev_ptr, dev_ptr + mRecCount, dev_ptr2, long_to_float_type());
if (op_type == 2) // >
thrust::transform(dev_ptr1, dev_ptr1+mRecCount, dev_ptr2, temp, f_greater());
else
if (op_type == 1) // <
thrust::transform(dev_ptr1, dev_ptr1+mRecCount, dev_ptr2, temp, f_less());
else
if (op_type == 6) // >=
thrust::transform(dev_ptr1, dev_ptr1+mRecCount, dev_ptr2, temp, f_greater_equal_to());
else
if (op_type == 5) // <=
thrust::transform(dev_ptr1, dev_ptr1+mRecCount, dev_ptr2, temp, f_less_equal());
else
if (op_type == 4)// =
thrust::transform(dev_ptr1, dev_ptr1+mRecCount, dev_ptr2, temp, f_equal_to());
else // !=
thrust::transform(dev_ptr1, dev_ptr1+mRecCount, dev_ptr2, temp, f_not_equal_to());
thrust::device_free(dev_ptr2);
return thrust::raw_pointer_cast(temp);
}
float_type* CudaSet::op(int_type* column1, float_type* column2, string op_type, bool reverse)
{
if(alloced_mem.empty()) {
alloc_pool(maxRecs);
};
thrust::device_ptr<float_type> temp((float_type*)alloced_mem.back());
thrust::device_ptr<int_type> dev_ptr(column1);
thrust::transform(dev_ptr, dev_ptr + mRecCount, temp, long_to_float_type()); // in-place transformation
thrust::device_ptr<float_type> dev_ptr1(column2);
if(reverse == 0) {
if (op_type.compare("MUL") == 0)
thrust::transform(dev_ptr1, dev_ptr1+mRecCount, temp, temp, thrust::multiplies<float_type>());
else
if (op_type.compare("ADD") == 0)
thrust::transform(dev_ptr1, dev_ptr1+mRecCount, temp, temp, thrust::plus<float_type>());
else
if (op_type.compare("MINUS") == 0)
thrust::transform(dev_ptr1, dev_ptr1+mRecCount, temp, temp, thrust::minus<float_type>());
else
thrust::transform(dev_ptr1, dev_ptr1+mRecCount, temp, temp, thrust::divides<float_type>());
}
else {
if (op_type.compare("MUL") == 0)
thrust::transform(temp, temp+mRecCount, dev_ptr1, temp, thrust::multiplies<float_type>());
else
if (op_type.compare("ADD") == 0)
thrust::transform(temp, temp+mRecCount, dev_ptr1, temp, thrust::plus<float_type>());
else
if (op_type.compare("MINUS") == 0)
thrust::transform(temp, temp+mRecCount, dev_ptr1, temp, thrust::minus<float_type>());
else
thrust::transform(temp, temp+mRecCount, dev_ptr1, temp, thrust::divides<float_type>());
};
alloced_mem.pop_back();
return thrust::raw_pointer_cast(temp);
}
int_type* CudaSet::op(int_type* column1, int_type d, string op_type, bool reverse, unsigned int p1, unsigned int p2)
{
if(alloced_mem.empty()) {
alloc_pool(maxRecs);
};
//cout << "OP " << d << " " << op_type << " " << p1 << " " << p2 << endl;
thrust::device_ptr<int_type> temp((int_type*)alloced_mem.back());
thrust::device_ptr<int_type> dev_ptr1(column1);
unsigned int d1 = d;
if(p2)
d = d*(unsigned int)pow(10, p2);
if(reverse == 0) {
if (op_type.compare("MUL") == 0) {
thrust::transform(dev_ptr1, dev_ptr1+mRecCount, thrust::make_constant_iterator(d1), temp, thrust::multiplies<int_type>());
}
else
if (op_type.compare("ADD") == 0) {
if(!p1)
thrust::transform(dev_ptr1, dev_ptr1+mRecCount, thrust::make_constant_iterator(d*(unsigned int)pow(10, p2)), temp, thrust::plus<int_type>());
else
thrust::transform(thrust::make_transform_iterator(dev_ptr1, power_functor<int_type>(p1)), thrust::make_transform_iterator(dev_ptr1+mRecCount, power_functor<int_type>(p1)), thrust::make_constant_iterator(d), temp, thrust::plus<int_type>());
}
else
if (op_type.compare("MINUS") == 0) {
if(!p1)
thrust::transform(dev_ptr1, dev_ptr1+mRecCount, thrust::make_constant_iterator(d*(unsigned int)pow(10, p2)), temp, thrust::minus<int_type>());
else
thrust::transform(thrust::make_transform_iterator(dev_ptr1, power_functor<int_type>(p1)), thrust::make_transform_iterator(dev_ptr1+mRecCount, power_functor<int_type>(p1)), thrust::make_constant_iterator(d), temp, thrust::minus<int_type>());
}
else {
if(!p1)
thrust::transform(dev_ptr1, dev_ptr1+mRecCount, thrust::make_constant_iterator(d*(unsigned int)pow(10, p2)), temp, thrust::divides<int_type>());
else
thrust::transform(thrust::make_transform_iterator(dev_ptr1, power_functor<int_type>(p1)), thrust::make_transform_iterator(dev_ptr1+mRecCount, power_functor<int_type>(p1)), thrust::make_constant_iterator(d), temp, thrust::divides<int_type>());
}
}
else {
if (op_type.compare("MUL") == 0) {
if(!p1)
thrust::transform(thrust::make_constant_iterator(d), thrust::make_constant_iterator(d) + mRecCount, dev_ptr1, temp, thrust::multiplies<int_type>());
else
thrust::transform(thrust::make_constant_iterator(d), thrust::make_constant_iterator(d) + mRecCount, thrust::make_transform_iterator(dev_ptr1, power_functor<int_type>(p1)), temp, thrust::multiplies<int_type>());
}
else
if (op_type.compare("ADD") == 0) {
if(!p1)
thrust::transform(thrust::make_constant_iterator(d), thrust::make_constant_iterator(d) + mRecCount, dev_ptr1, temp, thrust::plus<int_type>());
else
thrust::transform(thrust::make_constant_iterator(d), thrust::make_constant_iterator(d) + mRecCount, thrust::make_transform_iterator(dev_ptr1, power_functor<int_type>(p1)), temp, thrust::plus<int_type>());
}
else
if (op_type.compare("MINUS") == 0) {
if(!p1)
thrust::transform(thrust::make_constant_iterator(d), thrust::make_constant_iterator(d) + mRecCount, dev_ptr1, temp, thrust::minus<int_type>());
else
thrust::transform(thrust::make_constant_iterator(d), thrust::make_constant_iterator(d) + mRecCount, thrust::make_transform_iterator(dev_ptr1, power_functor<int_type>(p1)), temp, thrust::minus<int_type>());
}
else {
if(!p1)
thrust::transform(thrust::make_constant_iterator(d), thrust::make_constant_iterator(d) + mRecCount, dev_ptr1, temp, thrust::divides<int_type>());
else
thrust::transform(thrust::make_constant_iterator(d), thrust::make_constant_iterator(d) + mRecCount, thrust::make_transform_iterator(dev_ptr1, power_functor<int_type>(p1)), temp, thrust::divides<int_type>());
};
};
alloced_mem.pop_back();
return thrust::raw_pointer_cast(temp);
}
int_type* CudaSet::op(int_type* column1, int_type* column2, string op_type, bool reverse, unsigned int p1, unsigned int p2)
{
if(alloced_mem.empty()) {
alloc_pool(maxRecs);
};
thrust::device_ptr<int_type> temp((int_type*)alloced_mem.back());
thrust::device_ptr<int_type> dev_ptr1(column1);
thrust::device_ptr<int_type> dev_ptr2(column2);
//cout << "OP " << op_type << " " << p1 << " " << p2 << " " << reverse << endl;
if(reverse == 0) {
if (op_type.compare("MUL") == 0) {
thrust::transform(dev_ptr1, dev_ptr1+mRecCount, dev_ptr2, temp, thrust::multiplies<int_type>());
}
else
if (op_type.compare("ADD") == 0) {
if(!p1 && !p2)
thrust::transform(dev_ptr1, dev_ptr1+mRecCount, dev_ptr2, temp, thrust::plus<int_type>());
else
if(p1 && p2) {
thrust::transform(thrust::make_transform_iterator(dev_ptr1, power_functor<int_type>(p1)), thrust::make_transform_iterator(dev_ptr1+mRecCount, power_functor<int_type>(p1)), thrust::make_transform_iterator(dev_ptr2, power_functor<int_type>(p2)), temp, thrust::plus<int_type>());
}
else
if (p1)
thrust::transform(thrust::make_transform_iterator(dev_ptr1, power_functor<int_type>(p1)), thrust::make_transform_iterator(dev_ptr1+mRecCount, power_functor<int_type>(p1)), dev_ptr2, temp, thrust::plus<int_type>());
else
thrust::transform(dev_ptr1, dev_ptr1+mRecCount, thrust::make_transform_iterator(dev_ptr2, power_functor<int_type>(p2)), temp, thrust::plus<int_type>());
}
else
if (op_type.compare("MINUS") == 0) {
if(!p1 && !p2)
thrust::transform(dev_ptr1, dev_ptr1+mRecCount, dev_ptr2, temp, thrust::minus<int_type>());
else
if(p1 && p2)
thrust::transform(thrust::make_transform_iterator(dev_ptr1, power_functor<int_type>(p1)), thrust::make_transform_iterator(dev_ptr1+mRecCount, power_functor<int_type>(p1)), thrust::make_transform_iterator(dev_ptr2, power_functor<int_type>(p2)), temp, thrust::minus<int_type>());
else
if (p1)
thrust::transform(thrust::make_transform_iterator(dev_ptr1, power_functor<int_type>(p1)), thrust::make_transform_iterator(dev_ptr1+mRecCount, power_functor<int_type>(p1)), dev_ptr2, temp, thrust::minus<int_type>());
else
thrust::transform(dev_ptr1, dev_ptr1+mRecCount, thrust::make_transform_iterator(dev_ptr2, power_functor<int_type>(p2)), temp, thrust::minus<int_type>());
}
else {
if(!p1 && !p2)
thrust::transform(dev_ptr1, dev_ptr1+mRecCount, dev_ptr2, temp, thrust::divides<int_type>());
else
if(p1 && p2)
thrust::transform(thrust::make_transform_iterator(dev_ptr1, power_functor<int_type>(p1)), thrust::make_transform_iterator(dev_ptr1+mRecCount, power_functor<int_type>(p1)), thrust::make_transform_iterator(dev_ptr2, power_functor<int_type>(p2)), temp, thrust::divides<int_type>());
else
if (p1)
thrust::transform(thrust::make_transform_iterator(dev_ptr1, power_functor<int_type>(p1)), thrust::make_transform_iterator(dev_ptr1+mRecCount, power_functor<int_type>(p1)), dev_ptr2, temp, thrust::divides<int_type>());
else
thrust::transform(dev_ptr1, dev_ptr1+mRecCount, thrust::make_transform_iterator(dev_ptr2, power_functor<int_type>(p2)), temp, thrust::divides<int_type>());
}
}
else {
if (op_type.compare("MUL") == 0) {
thrust::transform(dev_ptr2, dev_ptr2+mRecCount, dev_ptr1, temp, thrust::multiplies<int_type>());
}
else
if (op_type.compare("ADD") == 0) {
if(!p1 && !p2)
thrust::transform(dev_ptr2, dev_ptr2+mRecCount, dev_ptr1, temp, thrust::plus<int_type>());
else
if(p1 && p2)
thrust::transform(thrust::make_transform_iterator(dev_ptr2, power_functor<int_type>(p2)), thrust::make_transform_iterator(dev_ptr2+mRecCount, power_functor<int_type>(p2)), thrust::make_transform_iterator(dev_ptr1, power_functor<int_type>(p1)), temp, thrust::plus<int_type>());
else
if (p1)
thrust::transform(dev_ptr2, dev_ptr2+mRecCount, thrust::make_transform_iterator(dev_ptr1, power_functor<int_type>(p1)), temp, thrust::plus<int_type>());
else
thrust::transform(thrust::make_transform_iterator(dev_ptr2, power_functor<int_type>(p2)), thrust::make_transform_iterator(dev_ptr2+mRecCount, power_functor<int_type>(p2)), dev_ptr1, temp, thrust::plus<int_type>());
}
else
if (op_type.compare("MINUS") == 0) {
if(!p1 && !p2)
thrust::transform(dev_ptr2, dev_ptr2+mRecCount, dev_ptr1, temp, thrust::minus<int_type>());
else
if(p1 && p2)
thrust::transform(thrust::make_transform_iterator(dev_ptr2, power_functor<int_type>(p2)), thrust::make_transform_iterator(dev_ptr2+mRecCount, power_functor<int_type>(p2)), thrust::make_transform_iterator(dev_ptr1, power_functor<int_type>(p1)), temp, thrust::minus<int_type>());
else
if (p1)
thrust::transform(dev_ptr2, dev_ptr2+mRecCount, thrust::make_transform_iterator(dev_ptr1, power_functor<int_type>(p1)), temp, thrust::minus<int_type>());
else
thrust::transform(thrust::make_transform_iterator(dev_ptr2, power_functor<int_type>(p2)), thrust::make_transform_iterator(dev_ptr2+mRecCount, power_functor<int_type>(p2)), dev_ptr1, temp, thrust::minus<int_type>());
}
else {
if(!p1 && !p2)
thrust::transform(dev_ptr2, dev_ptr2+mRecCount, dev_ptr1, temp, thrust::divides<int_type>());
else
if(p1 && p2)
thrust::transform(thrust::make_transform_iterator(dev_ptr2, power_functor<int_type>(p2)), thrust::make_transform_iterator(dev_ptr2+mRecCount, power_functor<int_type>(p2)), thrust::make_transform_iterator(dev_ptr1, power_functor<int_type>(p1)), temp, thrust::divides<int_type>());
else
if (p1)
thrust::transform(dev_ptr2, dev_ptr2+mRecCount, thrust::make_transform_iterator(dev_ptr1, power_functor<int_type>(p1)), temp, thrust::divides<int_type>());
else
thrust::transform(thrust::make_transform_iterator(dev_ptr2, power_functor<int_type>(p2)), thrust::make_transform_iterator(dev_ptr2+mRecCount, power_functor<int_type>(p2)), dev_ptr1, temp, thrust::divides<int_type>());
}
}
alloced_mem.pop_back();
return thrust::raw_pointer_cast(temp);
}
float_type* CudaSet::op(float_type* column1, float_type* column2, string op_type, bool reverse)
{
if(alloced_mem.empty()) {
alloc_pool(maxRecs);
};
thrust::device_ptr<float_type> temp((float_type*)alloced_mem.back());
thrust::device_ptr<float_type> dev_ptr1(column1);
thrust::device_ptr<float_type> dev_ptr2(column2);
if(reverse == 0) {
if (op_type.compare("MUL") == 0)
thrust::transform(dev_ptr1, dev_ptr1+mRecCount, dev_ptr2, temp, thrust::multiplies<float_type>());
else
if (op_type.compare("ADD") == 0)
thrust::transform(dev_ptr1, dev_ptr1+mRecCount, dev_ptr2, temp, thrust::plus<float_type>());
else
if (op_type.compare("MINUS") == 0)
thrust::transform(dev_ptr1, dev_ptr1+mRecCount, dev_ptr2, temp, thrust::minus<float_type>());
else
thrust::transform(dev_ptr1, dev_ptr1+mRecCount, dev_ptr2, temp, thrust::divides<float_type>());
}
else {
if (op_type.compare("MUL") == 0)
thrust::transform(dev_ptr2, dev_ptr2+mRecCount, dev_ptr1, temp, thrust::multiplies<float_type>());
else
if (op_type.compare("ADD") == 0)
thrust::transform(dev_ptr2, dev_ptr2+mRecCount, dev_ptr1, temp, thrust::plus<float_type>());
else
if (op_type.compare("MINUS") == 0)
thrust::transform(dev_ptr2, dev_ptr2+mRecCount, dev_ptr1, temp, thrust::minus<float_type>());
else
thrust::transform(dev_ptr2, dev_ptr2+mRecCount, dev_ptr1, temp, thrust::divides<float_type>());
};
alloced_mem.pop_back();
return thrust::raw_pointer_cast(temp);
}
float_type* CudaSet::op(int_type* column1, float_type d, string op_type, bool reverse)
{
if(alloced_mem.empty()) {
alloc_pool(maxRecs);
};
thrust::device_ptr<float_type> temp((float_type*)alloced_mem.back());
thrust::fill(temp, temp+mRecCount, d);
thrust::device_ptr<int_type> dev_ptr(column1);
thrust::device_ptr<float_type> dev_ptr1 = thrust::device_malloc<float_type>(mRecCount);
thrust::transform(dev_ptr, dev_ptr + mRecCount, dev_ptr1, long_to_float_type());
if(reverse == 0) {
if (op_type.compare("MUL") == 0)
thrust::transform(dev_ptr1, dev_ptr1+mRecCount, temp, temp, thrust::multiplies<float_type>());
else
if (op_type.compare("ADD") == 0)
thrust::transform(dev_ptr1, dev_ptr1+mRecCount, temp, temp, thrust::plus<float_type>());
else
if (op_type.compare("MINUS") == 0)
thrust::transform(dev_ptr1, dev_ptr1+mRecCount, temp, temp, thrust::minus<float_type>());
else
thrust::transform(dev_ptr1, dev_ptr1+mRecCount, temp, temp, thrust::divides<float_type>());
}
else {
if (op_type.compare("MUL") == 0)
thrust::transform(temp, temp+mRecCount, dev_ptr1, temp, thrust::multiplies<float_type>());
else
if (op_type.compare("ADD") == 0)
thrust::transform(temp, temp+mRecCount, dev_ptr1, temp, thrust::plus<float_type>());
else
if (op_type.compare("MINUS") == 0)
thrust::transform(temp, temp+mRecCount, dev_ptr1, temp, thrust::minus<float_type>());
else
thrust::transform(temp, temp+mRecCount, dev_ptr1, temp, thrust::divides<float_type>());
};
thrust::device_free(dev_ptr1);
alloced_mem.pop_back();
return thrust::raw_pointer_cast(temp);
}
float_type* CudaSet::op(float_type* column1, float_type d, string op_type,bool reverse)
{
if(alloced_mem.empty()) {
alloc_pool(maxRecs);
};
thrust::device_ptr<float_type> temp((float_type*)alloced_mem.back());
thrust::device_ptr<float_type> dev_ptr1(column1);
if(reverse == 0) {
if (op_type.compare("MUL") == 0)
thrust::transform(dev_ptr1, dev_ptr1+mRecCount, thrust::make_constant_iterator(d), temp, thrust::multiplies<float_type>());
else
if (op_type.compare("ADD") == 0)
thrust::transform(dev_ptr1, dev_ptr1+mRecCount, thrust::make_constant_iterator(d), temp, thrust::plus<float_type>());
else
if (op_type.compare("MINUS") == 0)
thrust::transform(dev_ptr1, dev_ptr1+mRecCount, thrust::make_constant_iterator(d), temp, thrust::minus<float_type>());
else
thrust::transform(dev_ptr1, dev_ptr1+mRecCount, thrust::make_constant_iterator(d), temp, thrust::divides<float_type>());
}
else {
if (op_type.compare("MUL") == 0)
thrust::transform(thrust::make_constant_iterator(d), thrust::make_constant_iterator(d)+mRecCount, dev_ptr1, temp, thrust::multiplies<float_type>());
else
if (op_type.compare("ADD") == 0)
thrust::transform(thrust::make_constant_iterator(d), thrust::make_constant_iterator(d)+mRecCount, dev_ptr1, temp, thrust::plus<float_type>());
else
if (op_type.compare("MINUS") == 0)
thrust::transform(thrust::make_constant_iterator(d), thrust::make_constant_iterator(d)+mRecCount, dev_ptr1, temp, thrust::minus<float_type>());
else
thrust::transform(thrust::make_constant_iterator(d), thrust::make_constant_iterator(d)+mRecCount, dev_ptr1, temp, thrust::divides<float_type>());
};
alloced_mem.pop_back();
return (float_type*)thrust::raw_pointer_cast(temp);
}
char CudaSet::loadIndex(const string index_name, const unsigned int segment)
{
FILE* f;
unsigned int bits_encoded, fit_count, vals_count, sz, real_count;
void* d_str;
string f1 = index_name + "." + to_string(segment);
char res;
//interactive = 0;
if(interactive) {
if(index_buffers.find(f1) == index_buffers.end()) {
f = fopen (f1.c_str(), "rb" );
fseek(f, 0, SEEK_END);
long fileSize = ftell(f);
char* buff;
hipHostMalloc(&buff, fileSize, hipHostMallocDefault);
fseek(f, 0, SEEK_SET);
fread(buff, fileSize, 1, f);
fclose(f);
index_buffers[f1] = buff;
};
sz = ((unsigned int*)index_buffers[f1])[0];
idx_dictionary_int[index_name].clear();
for(unsigned int i = 0; i < sz; i++) {
idx_dictionary_int[index_name][((int_type*)(index_buffers[f1]+4+8*i))[0]] = i;
};
vals_count = ((unsigned int*)(index_buffers[f1]+4 +8*sz))[2];
real_count = ((unsigned int*)(index_buffers[f1]+4 +8*sz))[3];
mRecCount = real_count;
if(idx_vals.count(index_name) == 0) {
hipMalloc((void **) &d_str, (vals_count+2)*int_size);
hipMemcpy( d_str, (void *) &((index_buffers[f1]+4 +8*sz)[0]), (vals_count+2)*int_size, hipMemcpyHostToDevice);
idx_vals[index_name] = (unsigned long long int*)d_str;
};
}
else {
f = fopen (f1.c_str(), "rb" );
fread(&sz, 4, 1, f);
int_type* d_array = new int_type[sz];
idx_dictionary_int[index_name].clear();
fread((void*)d_array, sz*int_size, 1, f);
for(unsigned int i = 0; i < sz; i++) {
idx_dictionary_int[index_name][d_array[i]] = i;
};
delete [] d_array;
fread(&fit_count, 4, 1, f);
fread(&bits_encoded, 4, 1, f);
fread(&vals_count, 4, 1, f);
fread(&real_count, 4, 1, f);
mRecCount = real_count;
unsigned long long int* int_array = new unsigned long long int[vals_count+2];
fseek ( f , -16 , SEEK_CUR );
fread((void*)int_array, 1, vals_count*8 + 16, f);
fread(&res, 1, 1, f);
fclose(f);
void* d_str;
hipMalloc((void **) &d_str, (vals_count+2)*int_size);
hipMemcpy( d_str, (void *) int_array, (vals_count+2)*int_size, hipMemcpyHostToDevice);
if(idx_vals.count(index_name))
hipFree(idx_vals[index_name]);
idx_vals[index_name] = (unsigned long long int*)d_str;
}
return res;
}
void CudaSet::initialize(queue<string> &nameRef, queue<string> &typeRef, queue<int> &sizeRef, queue<int> &colsRef, size_t Recs, string file_name) // compressed data for DIM tables
{
mColumnCount = (unsigned int)nameRef.size();
FILE* f;
string f1;
unsigned int cnt;
char buffer[4000];
string str;
not_compressed = 0;
mRecCount = Recs;
hostRecCount = Recs;
totalRecs = Recs;
load_file_name = file_name;
f1 = file_name + ".sort";
f = fopen (f1.c_str() , "rb" );
if(f) {
unsigned int sz, idx;
fread((char *)&sz, 4, 1, f);
for(unsigned int j = 0; j < sz; j++) {
fread((char *)&idx, 4, 1, f);
fread(buffer, idx, 1, f);
str.assign(buffer, idx);
sorted_fields.push(str);
if(verbose)
cout << "segment sorted on " << str << endl;
};
fclose(f);
};
f1 = file_name + ".presort";
f = fopen (f1.c_str() , "rb" );
if(f) {
unsigned int sz, idx;
fread((char *)&sz, 4, 1, f);
for(unsigned int j = 0; j < sz; j++) {
fread((char *)&idx, 4, 1, f);
fread(buffer, idx, 1, f);
str.assign(buffer, idx);
presorted_fields.push(str);
if(verbose)
cout << "presorted on " << str << endl;
};
fclose(f);
};
tmp_table = 0;
filtered = 0;
for(unsigned int i=0; i < mColumnCount; i++) {
//f1 = file_name + "." + nameRef.front() + ".0";
//f = fopen (f1.c_str() , "rb" );
//fread((char *)&bytes, 4, 1, f); //need to read metadata such as type and length
//fclose(f);
columnNames.push_back(nameRef.front());
cols[colsRef.front()] = nameRef.front();
if (((typeRef.front()).compare("decimal") == 0) || ((typeRef.front()).compare("int") == 0)) {
f1 = file_name + "." + nameRef.front() + ".0";
f = fopen (f1.c_str() , "rb" );
if(!f) {
cout << "Couldn't find field " << nameRef.front() << endl;
exit(0);
};
for(unsigned int j = 0; j < 6; j++)
fread((char *)&cnt, 4, 1, f);
fclose(f);
compTypes[nameRef.front()] = cnt;
};
if((typeRef.front()).compare("timestamp") == 0)
ts_cols[nameRef.front()] = 1;
else
ts_cols[nameRef.front()] = 0;
if ((typeRef.front()).compare("int") == 0 || (typeRef.front()).compare("timestamp") == 0) {
type[nameRef.front()] = 0;
decimal[nameRef.front()] = 0;
decimal_zeroes[nameRef.front()] = 0;
h_columns_int[nameRef.front()] = thrust::host_vector<int_type, pinned_allocator<int_type> >();
d_columns_int[nameRef.front()] = thrust::device_vector<int_type>();
}
else
if ((typeRef.front()).compare("float") == 0) {
type[nameRef.front()] = 1;
decimal[nameRef.front()] = 0;
h_columns_float[nameRef.front()] = thrust::host_vector<float_type, pinned_allocator<float_type> >();
d_columns_float[nameRef.front()] = thrust::device_vector<float_type >();
}
else
if ((typeRef.front()).compare("decimal") == 0) {
type[nameRef.front()] = 0;
decimal[nameRef.front()] = 1;
decimal_zeroes[nameRef.front()] = sizeRef.front();
h_columns_int[nameRef.front()] = thrust::host_vector<int_type, pinned_allocator<int_type> >();
d_columns_int[nameRef.front()] = thrust::device_vector<int_type>();
}
else {
type[nameRef.front()] = 2;
decimal[nameRef.front()] = 0;
h_columns_char[nameRef.front()] = nullptr;
d_columns_char[nameRef.front()] = nullptr;
char_size[nameRef.front()] = sizeRef.front();
string_map[nameRef.front()] = file_name + "." + nameRef.front();
};
nameRef.pop();
typeRef.pop();
sizeRef.pop();
colsRef.pop();
};
};
void CudaSet::initialize(queue<string> &nameRef, queue<string> &typeRef, queue<int> &sizeRef, queue<int> &colsRef, size_t Recs)
{
mColumnCount = (unsigned int)nameRef.size();
tmp_table = 0;
filtered = 0;
mRecCount = 0;
hostRecCount = Recs;
segCount = 0;
for(unsigned int i=0; i < mColumnCount; i++) {
columnNames.push_back(nameRef.front());
cols[colsRef.front()] = nameRef.front();
if((typeRef.front()).compare("timestamp") == 0)
ts_cols[nameRef.front()] = 1;
else
ts_cols[nameRef.front()] = 0;
if ((typeRef.front()).compare("int") == 0 || (typeRef.front()).compare("timestamp") == 0) {
type[nameRef.front()] = 0;
decimal[nameRef.front()] = 0;
decimal_zeroes[nameRef.front()] = 0;
h_columns_int[nameRef.front()] = thrust::host_vector<int_type, pinned_allocator<int_type> >();
d_columns_int[nameRef.front()] = thrust::device_vector<int_type>();
}
else
if ((typeRef.front()).compare("float") == 0) {
type[nameRef.front()] = 1;
decimal[nameRef.front()] = 0;
h_columns_float[nameRef.front()] = thrust::host_vector<float_type, pinned_allocator<float_type> >();
d_columns_float[nameRef.front()] = thrust::device_vector<float_type>();
}
else
if ((typeRef.front()).compare("decimal") == 0) {
type[nameRef.front()] = 0;
decimal[nameRef.front()] = 1;
decimal_zeroes[nameRef.front()] = sizeRef.front();
h_columns_int[nameRef.front()] = thrust::host_vector<int_type, pinned_allocator<int_type> >();
d_columns_int[nameRef.front()] = thrust::device_vector<int_type>();
}
else {
type[nameRef.front()] = 2;
decimal[nameRef.front()] = 0;
h_columns_char[nameRef.front()] = nullptr;
d_columns_char[nameRef.front()] = nullptr;
char_size[nameRef.front()] = sizeRef.front();
};
nameRef.pop();
typeRef.pop();
sizeRef.pop();
colsRef.pop();
};
};
void CudaSet::initialize(const size_t RecordCount, const unsigned int ColumnCount)
{
mRecCount = RecordCount;
hostRecCount = RecordCount;
mColumnCount = ColumnCount;
filtered = 0;
};
void CudaSet::initialize(queue<string> op_sel, const queue<string> op_sel_as)
{
mRecCount = 0;
mColumnCount = (unsigned int)op_sel.size();
segCount = 1;
not_compressed = 1;
filtered = 0;
col_aliases = op_sel_as;
unsigned int i = 0;
CudaSet *a;
while(!op_sel.empty()) {
for(auto it = varNames.begin(); it != varNames.end(); it++) {
a = it->second;
if(std::find(a->columnNames.begin(), a->columnNames.end(), op_sel.front()) != a->columnNames.end())
break;
};
type[op_sel.front()] = a->type[op_sel.front()];
cols[i] = op_sel.front();
decimal[op_sel.front()] = a->decimal[op_sel.front()];
decimal_zeroes[op_sel.front()] = a->decimal_zeroes[op_sel.front()];
columnNames.push_back(op_sel.front());
if (a->type[op_sel.front()] == 0) {
d_columns_int[op_sel.front()] = thrust::device_vector<int_type>();
//h_columns_int[op_sel.front()] = thrust::host_vector<int_type, pinned_allocator<int_type> >();
h_columns_int[op_sel.front()] = thrust::host_vector<int_type>();
}
else
if (a->type[op_sel.front()] == 1) {
d_columns_float[op_sel.front()] = thrust::device_vector<float_type>();
//h_columns_float[op_sel.front()] = thrust::host_vector<float_type, pinned_allocator<float_type> >();
h_columns_float[op_sel.front()] = thrust::host_vector<float_type>();
}
else {
h_columns_char[op_sel.front()] = nullptr;
d_columns_char[op_sel.front()] = nullptr;
char_size[op_sel.front()] = a->char_size[op_sel.front()];
};
i++;
op_sel.pop();
};
}
void CudaSet::initialize(CudaSet* a, CudaSet* b, queue<string> op_sel, queue<string> op_sel_as)
{
mRecCount = 0;
mColumnCount = 0;
queue<string> q_cnt(op_sel);
unsigned int i = 0;
set<string> field_names;
while(!q_cnt.empty()) {
if( std::find(a->columnNames.begin(), a->columnNames.end(), q_cnt.front()) != a->columnNames.end() ||
std::find(b->columnNames.begin(), b->columnNames.end(), q_cnt.front()) != b->columnNames.end()) {
field_names.insert(q_cnt.front());
};
q_cnt.pop();
}
mColumnCount = (unsigned int)field_names.size();
maxRecs = b->maxRecs;
segCount = 1;
filtered = 0;
not_compressed = 1;
col_aliases = op_sel_as;
i = 0;
while(!op_sel.empty()) {
if(std::find(columnNames.begin(), columnNames.end(), op_sel.front()) == columnNames.end()) {
if(std::find(a->columnNames.begin(), a->columnNames.end(), op_sel.front()) != a->columnNames.end()) {
cols[i] = op_sel.front();
decimal[op_sel.front()] = a->decimal[op_sel.front()];
columnNames.push_back(op_sel.front());
type[op_sel.front()] = a->type[op_sel.front()];
ts_cols[op_sel.front()] = a->ts_cols[op_sel.front()];
if (a->type[op_sel.front()] == 0) {
d_columns_int[op_sel.front()] = thrust::device_vector<int_type>();
h_columns_int[op_sel.front()] = thrust::host_vector<int_type, pinned_allocator<int_type> >();
if(a->string_map.find(op_sel.front()) != a->string_map.end()) {
string_map[op_sel.front()] = a->string_map[op_sel.front()];
};
decimal[op_sel.front()] = a->decimal[op_sel.front()];
decimal_zeroes[op_sel.front()] = a->decimal_zeroes[op_sel.front()];
}
else
if (a->type[op_sel.front()] == 1) {
d_columns_float[op_sel.front()] = thrust::device_vector<float_type>();
h_columns_float[op_sel.front()] = thrust::host_vector<float_type, pinned_allocator<float_type> >();
}
else {
h_columns_char[op_sel.front()] = nullptr;
d_columns_char[op_sel.front()] = nullptr;
char_size[op_sel.front()] = a->char_size[op_sel.front()];
string_map[op_sel.front()] = a->string_map[op_sel.front()];
};
i++;
}
else
if(std::find(b->columnNames.begin(), b->columnNames.end(), op_sel.front()) != b->columnNames.end()) {
columnNames.push_back(op_sel.front());
cols[i] = op_sel.front();
decimal[op_sel.front()] = b->decimal[op_sel.front()];
type[op_sel.front()] = b->type[op_sel.front()];
ts_cols[op_sel.front()] = b->ts_cols[op_sel.front()];
if (b->type[op_sel.front()] == 0) {
d_columns_int[op_sel.front()] = thrust::device_vector<int_type>();
h_columns_int[op_sel.front()] = thrust::host_vector<int_type, pinned_allocator<int_type> >();
if(b->string_map.find(op_sel.front()) != b->string_map.end()) {
string_map[op_sel.front()] = b->string_map[op_sel.front()];
};
decimal[op_sel.front()] = b->decimal[op_sel.front()];
decimal_zeroes[op_sel.front()] = b->decimal_zeroes[op_sel.front()];
}
else
if (b->type[op_sel.front()] == 1) {
d_columns_float[op_sel.front()] = thrust::device_vector<float_type>();
h_columns_float[op_sel.front()] = thrust::host_vector<float_type, pinned_allocator<float_type> >();
}
else {
h_columns_char[op_sel.front()] = nullptr;
d_columns_char[op_sel.front()] = nullptr;
char_size[op_sel.front()] = b->char_size[op_sel.front()];
string_map[op_sel.front()] = b->string_map[op_sel.front()];
};
i++;
}
}
op_sel.pop();
};
};
int_type reverse_op(int_type op_type)
{
if (op_type == 2) // >
return 1;
else
if (op_type == 1) // <
return 2;
else
if (op_type == 6) // >=
return 5;
else
if (op_type == 5) // <=
return 6;
else
return op_type;
}
size_t getFreeMem()
{
size_t available, total;
hipMemGetInfo(&available, &total);
return available;
} ;
void allocColumns(CudaSet* a, queue<string> fields)
{
if(a->filtered) {
CudaSet* t;
if(a->filtered)
t = varNames[a->source_name];
else
t = a;
if(int_size*t->maxRecs > alloced_sz) {
if(alloced_sz) {
hipFree(alloced_tmp);
};
hipMalloc((void **) &alloced_tmp, int_size*t->maxRecs);
alloced_sz = int_size*t->maxRecs;
}
}
else {
while(!fields.empty()) {
if(var_exists(a, fields.front()) && !a->onDevice(fields.front())) {
a->allocColumnOnDevice(fields.front(), a->maxRecs);
}
fields.pop();
};
};
}
void gatherColumns(CudaSet* a, CudaSet* t, string field, unsigned int segment, size_t& count)
{
if(!a->onDevice(field)) {
a->allocColumnOnDevice(field, a->maxRecs);
};
if(a->prm_index == 'R') {
mygather(field, a, t, count, a->mRecCount);
}
else {
mycopy(field, a, t, count, t->mRecCount);
a->mRecCount = t->mRecCount;
};
}
void copyFinalize(CudaSet* a, queue<string> fields, bool ts)
{
set<string> uniques;
if(scratch.size() < a->mRecCount*8)
scratch.resize(a->mRecCount*8);
thrust::device_ptr<int_type> tmp((int_type*)thrust::raw_pointer_cast(scratch.data()));
while(!fields.empty()) {
if (uniques.count(fields.front()) == 0 && var_exists(a, fields.front()) && cpy_bits.find(fields.front()) != cpy_bits.end() && (!a->ts_cols[fields.front()] || ts)) {
if(cpy_bits[fields.front()] == 8) {
if(a->type[fields.front()] != 1) {
thrust::device_ptr<unsigned char> src((unsigned char*)thrust::raw_pointer_cast(a->d_columns_int[fields.front()].data()));
thrust::transform(src, src+a->mRecCount, tmp, to_int64<unsigned char>());
}
else {
thrust::device_ptr<unsigned char> src((unsigned char*)thrust::raw_pointer_cast(a->d_columns_float[fields.front()].data()));
thrust::transform(src, src+a->mRecCount, tmp, to_int64<unsigned char>());
};
}
else
if(cpy_bits[fields.front()] == 16) {
if(a->type[fields.front()] != 1) {
thrust::device_ptr<unsigned short int> src((unsigned short int*)thrust::raw_pointer_cast(a->d_columns_int[fields.front()].data()));
thrust::transform(src, src+a->mRecCount, tmp, to_int64<unsigned short>());
}
else {
thrust::device_ptr<unsigned short int> src((unsigned short int*)thrust::raw_pointer_cast(a->d_columns_float[fields.front()].data()));
thrust::transform(src, src+a->mRecCount, tmp, to_int64<unsigned short>());
};
}
else
if(cpy_bits[fields.front()] == 32) {
if(a->type[fields.front()] != 1) {
thrust::device_ptr<unsigned int> src((unsigned int*)thrust::raw_pointer_cast(a->d_columns_int[fields.front()].data()));
thrust::transform(src, src+a->mRecCount, tmp, to_int64<unsigned int>());
}
else {
thrust::device_ptr<unsigned int> src((unsigned int*)thrust::raw_pointer_cast(a->d_columns_float[fields.front()].data()));
thrust::transform(src, src+a->mRecCount, tmp, to_int64<unsigned int>());
};
}
else {
if(a->type[fields.front()] != 1) {
thrust::device_ptr<int_type> src((int_type*)thrust::raw_pointer_cast(a->d_columns_int[fields.front()].data()));
thrust::copy(src, src+a->mRecCount, tmp);
}
else {
thrust::device_ptr<int_type> src((int_type*)thrust::raw_pointer_cast(a->d_columns_float[fields.front()].data()));
thrust::copy(src, src+a->mRecCount, tmp);
};
};
thrust::constant_iterator<int_type> iter(cpy_init_val[fields.front()]);
if(a->type[fields.front()] != 1) {
thrust::transform(tmp, tmp + a->mRecCount, iter, a->d_columns_int[fields.front()].begin(), thrust::plus<int_type>());
}
else {
thrust::device_ptr<int_type> dest((int_type*)thrust::raw_pointer_cast(a->d_columns_float[fields.front()].data()));
thrust::transform(tmp, tmp + a->mRecCount, iter, dest, thrust::plus<int_type>());
thrust::transform(dest, dest+a->mRecCount, a->d_columns_float[fields.front()].begin(), long_to_float());
};
};
uniques.insert(fields.front());
fields.pop();
};
}
void copyColumns(CudaSet* a, queue<string> fields, unsigned int segment, size_t& count, bool rsz, bool flt)
{
//std::clock_t start1 = std::clock();
set<string> uniques;
if(a->filtered) { //filter the segment
if(flt) {
filter_op(a->fil_s, a->fil_f, segment);
};
if(rsz && a->mRecCount) {
queue<string> fields1(fields);
while(!fields1.empty()) {
a->resizeDeviceColumn(a->devRecCount + a->mRecCount, fields1.front());
fields1.pop();
};
a->devRecCount = a->devRecCount + a->mRecCount;
};
};
cpy_bits.clear();
cpy_init_val.clear();
auto f(fields);
while(!fields.empty()) {
if (uniques.count(fields.front()) == 0 && var_exists(a, fields.front())) {
if(a->filtered) {
if(a->mRecCount) {
CudaSet *t = varNames[a->source_name];
alloced_switch = 1;
t->CopyColumnToGpu(fields.front(), segment);
gatherColumns(a, t, fields.front(), segment, count);
alloced_switch = 0;
};
}
else {
if(a->mRecCount) {
a->CopyColumnToGpu(fields.front(), segment, count);
};
};
uniques.insert(fields.front());
};
fields.pop();
};
//std::cout<< "copy time " << ( ( std::clock() - start1 ) / (double)CLOCKS_PER_SEC ) <<'\n';
}
void mygather(string colname, CudaSet* a, CudaSet* t, size_t offset, size_t g_size)
{
if(t->type[colname] != 1 ) {
if(cpy_bits.find(colname) != cpy_bits.end()) { // non-delta compression
if(cpy_bits[colname] == 8) {
thrust::device_ptr<unsigned char> d_col_source((unsigned char*)alloced_tmp);
thrust::device_ptr<unsigned char> d_col_dest((unsigned char*)thrust::raw_pointer_cast(a->d_columns_int[colname].data()));
thrust::gather(a->prm_d.begin(), a->prm_d.begin() + g_size, d_col_source, d_col_dest + offset);
}
else
if(cpy_bits[colname] == 16) {
thrust::device_ptr<unsigned short int> d_col_source((unsigned short int*)alloced_tmp);
thrust::device_ptr<unsigned short int> d_col_dest((unsigned short int*)thrust::raw_pointer_cast(a->d_columns_int[colname].data()));
thrust::gather(a->prm_d.begin(), a->prm_d.begin() + g_size, d_col_source, d_col_dest + offset);
}
else
if(cpy_bits[colname] == 32) {
thrust::device_ptr<unsigned int> d_col_source((unsigned int*)alloced_tmp);
thrust::device_ptr<unsigned int> d_col_dest((unsigned int*)thrust::raw_pointer_cast(a->d_columns_int[colname].data()));
thrust::gather(a->prm_d.begin(), a->prm_d.begin() + g_size, d_col_source, d_col_dest + offset);
}
else
if(cpy_bits[colname] == 64) {
thrust::device_ptr<int_type> d_col((int_type*)alloced_tmp);
thrust::gather(a->prm_d.begin(), a->prm_d.begin() + g_size, d_col, a->d_columns_int[colname].begin() + offset);
};
}
else {
thrust::device_ptr<int_type> d_col((int_type*)alloced_tmp);
thrust::gather(a->prm_d.begin(), a->prm_d.begin() + g_size, d_col, a->d_columns_int[colname].begin() + offset);
};
}
else {
if(cpy_bits.find(colname) != cpy_bits.end()) { // non-delta compression
if(cpy_bits[colname] == 8) {
thrust::device_ptr<unsigned char> d_col_source((unsigned char*)alloced_tmp);
thrust::device_ptr<unsigned char> d_col_dest((unsigned char*)thrust::raw_pointer_cast(a->d_columns_float[colname].data()));
thrust::gather(a->prm_d.begin(), a->prm_d.begin() + g_size, d_col_source, d_col_dest + offset);
}
else
if(cpy_bits[colname] == 16) {
thrust::device_ptr<unsigned short int> d_col_source((unsigned short int*)alloced_tmp);
thrust::device_ptr<unsigned short int> d_col_dest((unsigned short int*)thrust::raw_pointer_cast(a->d_columns_float[colname].data()));
thrust::gather(a->prm_d.begin(), a->prm_d.begin() + g_size, d_col_source, d_col_dest + offset);
}
else
if(cpy_bits[colname] == 32) {
thrust::device_ptr<unsigned int> d_col_source((unsigned int*)alloced_tmp);
thrust::device_ptr<unsigned int> d_col_dest((unsigned int*)thrust::raw_pointer_cast(a->d_columns_float[colname].data()));
thrust::gather(a->prm_d.begin(), a->prm_d.begin() + g_size, d_col_source, d_col_dest + offset);
}
else
if(cpy_bits[colname] == 64) {
thrust::device_ptr<int_type> d_col((int_type*)alloced_tmp);
thrust::gather(a->prm_d.begin(), a->prm_d.begin() + g_size, d_col, a->d_columns_float[colname].begin() + offset);
};
}
else {
thrust::device_ptr<float_type> d_col((float_type*)alloced_tmp);
thrust::gather(a->prm_d.begin(), a->prm_d.begin() + g_size, d_col, a->d_columns_float[colname].begin() + offset);
};
}
};
void mycopy(string colname, CudaSet* a, CudaSet* t, size_t offset, size_t g_size)
{
if(t->type[colname] != 1) {
if(cpy_bits.find(colname) != cpy_bits.end()) { // non-delta compression
if(cpy_bits[colname] == 8) {
thrust::device_ptr<unsigned char> d_col_source((unsigned char*)alloced_tmp);
thrust::device_ptr<unsigned char> d_col_dest((unsigned char*)thrust::raw_pointer_cast(a->d_columns_int[colname].data()));
thrust::copy(d_col_source, d_col_source + g_size, d_col_dest + offset);
}
else
if(cpy_bits[colname] == 16) {
thrust::device_ptr<short int> d_col_source((short int*)alloced_tmp);
thrust::device_ptr<short int> d_col_dest((short int*)thrust::raw_pointer_cast(a->d_columns_int[colname].data()+offset));
thrust::copy(d_col_source, d_col_source + g_size, d_col_dest + offset);
}
else
if(cpy_bits[colname] == 32) {
thrust::device_ptr<unsigned int> d_col_source((unsigned int*)alloced_tmp);
thrust::device_ptr<unsigned int> d_col_dest((unsigned int*)thrust::raw_pointer_cast(a->d_columns_int[colname].data()));
thrust::copy(d_col_source, d_col_source + g_size, d_col_dest + offset);
}
else
if(cpy_bits[colname] == 64) {
thrust::device_ptr<int_type> d_col_source((int_type*)alloced_tmp);
thrust::copy(d_col_source, d_col_source + g_size, a->d_columns_int[colname].begin() + offset);
};
}
else {
thrust::device_ptr<int_type> d_col((int_type*)alloced_tmp);
thrust::copy(d_col, d_col + g_size, a->d_columns_int[colname].begin() + offset);
};
}
else {
if(cpy_bits.find(colname) != cpy_bits.end()) { // non-delta compression
if(cpy_bits[colname] == 8) {
thrust::device_ptr<unsigned char> d_col_source((unsigned char*)alloced_tmp);
thrust::device_ptr<unsigned char> d_col_dest((unsigned char*)thrust::raw_pointer_cast(a->d_columns_float[colname].data()));
thrust::copy(d_col_source, d_col_source + g_size, d_col_dest + offset);
}
else
if(cpy_bits[colname] == 16) {
thrust::device_ptr<short int> d_col_source((short int*)alloced_tmp);
thrust::device_ptr<short int> d_col_dest((short int*)thrust::raw_pointer_cast(a->d_columns_float[colname].data()+offset));
thrust::copy(d_col_source, d_col_source + g_size, d_col_dest + offset);
}
else
if(cpy_bits[colname] == 32) {
thrust::device_ptr<unsigned int> d_col_source((unsigned int*)alloced_tmp);
thrust::device_ptr<unsigned int> d_col_dest((unsigned int*)thrust::raw_pointer_cast(a->d_columns_float[colname].data()));
thrust::copy(d_col_source, d_col_source + g_size, d_col_dest + offset);
}
else
if(cpy_bits[colname] == 64) {
thrust::device_ptr<int_type> d_col_source((int_type*)alloced_tmp);
thrust::copy(d_col_source, d_col_source + g_size, a->d_columns_float[colname].begin() + offset);
};
}
else {
thrust::device_ptr<float_type> d_col((float_type*)alloced_tmp);
thrust::copy(d_col, d_col + g_size, a->d_columns_float[colname].begin() + offset);
};
};
};
size_t load_queue(queue<string> c1, CudaSet* right, string f2, size_t &rcount,
unsigned int start_segment, unsigned int end_segment, bool rsz, bool flt)
{
queue<string> cc;
while(!c1.empty()) {
if(std::find(right->columnNames.begin(), right->columnNames.end(), c1.front()) != right->columnNames.end()) {
if(f2 != c1.front() ) {
cc.push(c1.front());
};
};
c1.pop();
};
if(std::find(right->columnNames.begin(), right->columnNames.end(), f2) != right->columnNames.end()) {
cc.push(f2);
};
if(right->filtered) {
allocColumns(right, cc);
};
rcount = right->maxRecs;
queue<string> ct(cc);
while(!ct.empty()) {
if(right->filtered && rsz) {
right->mRecCount = 0;
}
else {
right->allocColumnOnDevice(ct.front(), rcount*right->segCount);
};
ct.pop();
};
size_t cnt_r = 0;
right->devRecCount = 0;
for(unsigned int i = start_segment; i < end_segment; i++) {
if(!right->filtered)
copyColumns(right, cc, i, cnt_r, rsz, 0);
else
copyColumns(right, cc, i, cnt_r, rsz, flt);
cnt_r = cnt_r + right->mRecCount;
};
right->mRecCount = cnt_r;
return cnt_r;
}
size_t max_char(CudaSet* a)
{
size_t max_char1 = 8;
for(unsigned int i = 0; i < a->columnNames.size(); i++) {
if(a->type[a->columnNames[i]] == 2) {
if (a->char_size[a->columnNames[i]] > max_char1)
max_char1 = a->char_size[a->columnNames[i]];
}
else
if(a->type[a->columnNames[i]] == 0 && a->string_map.find(a->columnNames[i]) != a->string_map.end()) {
auto s = a->string_map[a->columnNames[i]];
auto pos = s.find_first_of(".");
auto len = data_dict[s.substr(0, pos)][s.substr(pos+1)].col_length;
if (len > max_char1)
max_char1 = len;
};
};
return max_char1;
};
size_t max_char(CudaSet* a, queue<string> field_names)
{
size_t max_char = 8;
while (!field_names.empty()) {
if (a->type[field_names.front()] == 2) {
if (a->char_size[field_names.front()] > max_char)
max_char = a->char_size[field_names.front()];
};
field_names.pop();
};
return max_char;
};
void setSegments(CudaSet* a, queue<string> cols)
{
size_t mem_available = getFreeMem();
size_t tot_sz = 0;
while(!cols.empty()) {
if(a->type[cols.front()] != 2)
tot_sz = tot_sz + int_size;
else
tot_sz = tot_sz + a->char_size[cols.front()];
cols.pop();
};
if(a->mRecCount*tot_sz > mem_available/3) { //default is 3
a->segCount = (a->mRecCount*tot_sz)/(mem_available/5) + 1;
a->maxRecs = (a->mRecCount/a->segCount)+1;
};
};
void update_permutation_char_host(char* key, unsigned int* permutation, size_t RecCount, string SortType, char* tmp, unsigned int len)
{
str_gather_host(permutation, RecCount, (void*)key, (void*)tmp, len);
if (SortType.compare("DESC") == 0 )
str_sort_host(tmp, RecCount, permutation, 1, len);
else
str_sort_host(tmp, RecCount, permutation, 0, len);
}
void apply_permutation_char(char* key, unsigned int* permutation, size_t RecCount, char* tmp, unsigned int len)
{
// copy keys to temporary vector
hipMemcpy( (void*)tmp, (void*) key, RecCount*len, hipMemcpyDeviceToDevice);
// permute the keys
str_gather((void*)permutation, RecCount, (void*)tmp, (void*)key, len);
}
void apply_permutation_char_host(char* key, unsigned int* permutation, size_t RecCount, char* res, unsigned int len)
{
str_gather_host(permutation, RecCount, (void*)key, (void*)res, len);
}
void filter_op(const char *s, const char *f, unsigned int segment)
{
CudaSet *a, *b;
a = varNames.find(f)->second;
a->name = f;
//std::clock_t start1 = std::clock();
if(a->mRecCount == 0 && !a->filtered) {
b = new CudaSet(0,1);
}
else {
if(verbose)
cout << "FILTER " << s << " " << f << " " << getFreeMem() << '\xd';
b = varNames[s];
b->name = s;
b->string_map = a->string_map;
size_t cnt = 0;
b->sorted_fields = a->sorted_fields;
b->ts_cols = a->ts_cols;
allocColumns(a, b->fil_value);
if (b->prm_d.size() == 0) {
b->prm_d.resize(a->maxRecs);
};
cout << endl << "MAP CHECK start " << segment << endl;
char map_check = zone_map_check(b->fil_type,b->fil_value,b->fil_nums, b->fil_nums_f, b->fil_nums_precision, a, segment);
cout << endl << "MAP CHECK segment " << segment << " " << map_check << endl;
if(map_check == 'R') {
auto old_ph = phase_copy;
phase_copy = 0;
copyColumns(a, b->fil_value, segment, cnt);
phase_copy = old_ph;
bool* res = filter(b->fil_type,b->fil_value,b->fil_nums, b->fil_nums_f, b->fil_nums_precision, a, segment);
thrust::device_ptr<bool> bp((bool*)res);
b->prm_index = 'R';
b->mRecCount = thrust::count(bp, bp + (unsigned int)a->mRecCount, 1);
thrust::copy_if(thrust::make_counting_iterator((unsigned int)0), thrust::make_counting_iterator((unsigned int)a->mRecCount),
bp, b->prm_d.begin(), thrust::identity<bool>());
hipFree(res);
}
else {
b->prm_index = map_check;
if(map_check == 'A')
b->mRecCount = a->mRecCount;
else
b->mRecCount = 0;
};
if(segment == a->segCount-1)
a->deAllocOnDevice();
}
if(verbose)
cout << endl << "filter result " << b->mRecCount << endl;
}
size_t load_right(CudaSet* right, string f2, queue<string> op_g, queue<string> op_alt, size_t& rcount, unsigned int start_seg, unsigned int end_seg) {
size_t cnt_r = 0;
//if join is on strings then add integer columns to left and right tables and modify colInd1 and colInd2
// need to allocate all right columns
if(right->not_compressed) {
queue<string> op_alt1;
op_alt1.push(f2);
cnt_r = load_queue(op_alt1, right, "", rcount, start_seg, end_seg, 1, 1);
queue<string> op_alt2;
while(!op_alt.empty()) {
if(f2.compare(op_alt.front())) {
if (std::find(right->columnNames.begin(), right->columnNames.end(), op_alt.front()) != right->columnNames.end()) {
op_alt2.push(op_alt.front());
};
};
op_alt.pop();
};
if(!op_alt2.empty())
cnt_r = load_queue(op_alt2, right, "", rcount, start_seg, end_seg, 0, 0);
}
else {
cnt_r = load_queue(op_alt, right, f2, rcount, start_seg, end_seg, 1, 1);
};
return cnt_r;
};
void insert_records(const char* f, const char* s) {
char buf[4096];
size_t size, maxRecs, cnt = 0;
string str_s, str_d;
if(varNames.find(s) == varNames.end()) {
process_error(3, "couldn't find " + string(s) );
};
CudaSet *a;
a = varNames.find(s)->second;
a->name = s;
if(varNames.find(f) == varNames.end()) {
process_error(3, "couldn't find " + string(f) );
};
CudaSet *b;
b = varNames.find(f)->second;
b->name = f;
// if both source and destination are on disk
cout << "SOURCES " << a->source << ":" << b->source << endl;
if(a->source && b->source) {
for(unsigned int i = 0; i < a->segCount; i++) {
for(unsigned int z = 0; z < a->columnNames.size(); z++) {
if(a->type[a->columnNames[z]] != 2) {
str_s = a->load_file_name + "." + a->columnNames[z] + "." + to_string(i);
str_d = b->load_file_name + "." + a->columnNames[z] + "." + to_string(b->segCount + i);
cout << str_s << " " << str_d << endl;
FILE* source = fopen(str_s.c_str(), "rb");
FILE* dest = fopen(str_d.c_str(), "wb");
while (size = fread(buf, 1, BUFSIZ, source)) {
fwrite(buf, 1, size, dest);
}
fclose(source);
fclose(dest);
}
else { //merge strings
//read b's strings
str_s = b->load_file_name + "." + b->columnNames[z];
FILE* dest = fopen(str_s.c_str(), "rb");
auto len = b->char_size[b->columnNames[z]];
map<string, unsigned long long int> map_d;
buf[len] = 0;
unsigned long long cnt = 0;
while (fread(buf, len, 1, dest)) {
map_d[buf] = cnt;
cnt++;
};
fclose(dest);
unsigned long long int cct = cnt;
str_s = a->load_file_name + "." + a->columnNames[z] + "." + to_string(i) + ".hash";
str_d = b->load_file_name + "." + b->columnNames[z] + "." + to_string(b->segCount + i) + ".hash";
FILE* source = fopen(str_s.c_str(), "rb");
dest = fopen(str_d.c_str(), "wb");
while (size = fread(buf, 1, BUFSIZ, source)) {
fwrite(buf, 1, size, dest);
}
fclose(source);
fclose(dest);
str_s = a->load_file_name + "." + a->columnNames[z];
source = fopen(str_s.c_str(), "rb");
map<unsigned long long int, string> map_s;
buf[len] = 0;
cnt = 0;
while (fread(buf, len, 1, source)) {
map_s[cnt] = buf;
cnt++;
};
fclose(source);
queue<string> op_vx;
op_vx.push(a->columnNames[z]);
allocColumns(a, op_vx);
a->resize(a->maxRecs);
a->CopyColumnToGpu(a->columnNames[z], z, 0);
a->CopyColumnToHost(a->columnNames[z]);
str_d = b->load_file_name + "." + b->columnNames[z];
fstream f_file;
f_file.open(str_d.c_str(), ios::out|ios::app|ios::binary);
for(auto j = 0; j < a->mRecCount; j++) {
auto ss = map_s[a->h_columns_int[a->columnNames[z]][j]];
if(map_d.find(ss) == map_d.end()) { //add
f_file.write((char *)ss.c_str(), len);
a->h_columns_int[a->columnNames[z]][j] = cct;
cct++;
}
else {
a->h_columns_int[a->columnNames[z]][j] = map_d[ss];
};
};
f_file.close();
thrust::device_vector<int_type> d_col(a->mRecCount);
thrust::copy(a->h_columns_int[a->columnNames[z]].begin(), a->h_columns_int[a->columnNames[z]].begin() + a->mRecCount, d_col.begin());
auto i_name = b->load_file_name + "." + b->columnNames[z] + "." + to_string(b->segCount + i) + ".idx";
pfor_compress(thrust::raw_pointer_cast(d_col.data()), a->mRecCount*int_size, i_name, a->h_columns_int[a->columnNames[z]], 0);
};
};
};
if(a->maxRecs > b->maxRecs)
maxRecs = a->maxRecs;
else
maxRecs = b->maxRecs;
for(unsigned int i = 0; i < b->columnNames.size(); i++) {
b->reWriteHeader(b->load_file_name, b->columnNames[i], a->segCount + b->segCount, a->totalRecs + b->totalRecs, maxRecs);
};
}
else
if(!a->source && !b->source) { //if both source and destination are in memory
size_t oldCount = b->mRecCount;
b->resize(a->mRecCount);
for(unsigned int z = 0; z< b->mColumnCount; z++) {
if(b->type[a->columnNames[z]] == 0) {
thrust::copy(a->h_columns_int[a->columnNames[z]].begin(), a->h_columns_int[a->columnNames[z]].begin() + a->mRecCount, b->h_columns_int[b->columnNames[z]].begin() + oldCount);
}
else
if(b->type[a->columnNames[z]] == 1) {
thrust::copy(a->h_columns_float[a->columnNames[z]].begin(), a->h_columns_float[a->columnNames[z]].begin() + a->mRecCount, b->h_columns_float[b->columnNames[z]].begin() + oldCount);
}
else {
hipMemcpy(b->h_columns_char[b->columnNames[z]] + b->char_size[b->columnNames[z]]*oldCount, a->h_columns_char[a->columnNames[z]], a->char_size[a->columnNames[z]]*a->mRecCount, hipMemcpyHostToHost);
};
};
}
else
if(!a->source && b->source) {
total_segments = b->segCount;
total_count = b->mRecCount;
total_max = b->maxRecs;;
queue<string> op_vx;
for(unsigned int i=0; i < a->columnNames.size(); i++)
op_vx.push(a->columnNames[i]);
allocColumns(a, op_vx);
a->resize(a->maxRecs);
for(unsigned int i = 0; i < a->segCount; i++) {
if (a->filtered) {
copyColumns(a, op_vx, i, cnt);
a->CopyToHost(0, a->mRecCount);
};
a->compress(b->load_file_name, 0, 1, i - (a->segCount-1), a->mRecCount, 0);
};
for(unsigned int i = 0; i < b->columnNames.size(); i++) {
b->writeHeader(b->load_file_name, b->columnNames[i], total_segments);
};
};
};
void delete_records(const char* f) {
CudaSet *a;
a = varNames.find(f)->second;
a->name = f;
size_t totalRemoved = 0;
size_t maxRecs = 0;
if(!a->keep) { // temporary variable
process_error(2, "Delete operator is only applicable to disk based sets\nfor deleting records from derived sets please use filter operator ");
}
else { // read matching segments, delete, compress and write on a disk replacing the original segments
string str, str_old;
queue<string> op_vx;
size_t cnt;
for ( auto it=data_dict[a->load_file_name].begin() ; it != data_dict[a->load_file_name].end(); ++it ) {
op_vx.push((*it).first);
if (std::find(a->columnNames.begin(), a->columnNames.end(), (*it).first) == a->columnNames.end()) {
if ((*it).second.col_type == 0) {
a->type[(*it).first] = 0;
a->decimal[(*it).first] = 0;
//a->h_columns_int[(*it).first] = thrust::host_vector<int_type, pinned_allocator<int_type> >();
a->h_columns_int[(*it).first] = thrust::host_vector<int_type>();
a->d_columns_int[(*it).first] = thrust::device_vector<int_type>();
}
else
if((*it).second.col_type == 1) {
a->type[(*it).first] = 1;
a->decimal[(*it).first] = 0;
//a->h_columns_float[(*it).first] = thrust::host_vector<float_type, pinned_allocator<float_type> >();
a->h_columns_float[(*it).first] = thrust::host_vector<float_type>();
a->d_columns_float[(*it).first] = thrust::device_vector<float_type>();
}
else
if ((*it).second.col_type == 3) {
a->type[(*it).first] = 1;
a->decimal[(*it).first] = 1;
//a->h_columns_float[(*it).first] = thrust::host_vector<float_type, pinned_allocator<float_type> >();
a->h_columns_float[(*it).first] = thrust::host_vector<float_type>();
a->d_columns_float[(*it).first] = thrust::device_vector<float_type>();
}
else {
a->type[(*it).first] = 2;
a->decimal[(*it).first] = 0;
a->h_columns_char[(*it).first] = nullptr;
a->d_columns_char[(*it).first] = nullptr;
a->char_size[(*it).first] = (*it).second.col_length;
};
a->columnNames.push_back((*it).first);
}
};
allocColumns(a, op_vx);
a->resize(a->maxRecs);
a->prm_d.resize(a->maxRecs);
size_t cc = a->mRecCount;
size_t tmp;
void* d;
CUDA_SAFE_CALL(hipMalloc((void **) &d, a->maxRecs*float_size));
unsigned int new_seg_count = 0;
char map_check;
for(unsigned int i = 0; i < a->segCount; i++) {
map_check = zone_map_check(op_type,op_value,op_nums, op_nums_f, op_nums_precision, a, i);
if(verbose)
cout << "MAP CHECK segment " << i << " " << map_check << endl;
if(map_check != 'N') {
cnt = 0;
copyColumns(a, op_vx, i, cnt);
tmp = a->mRecCount;
if(a->mRecCount) {
bool* res = filter(op_type,op_value,op_nums, op_nums_f, op_nums_precision, a, i);
thrust::device_ptr<bool> bp((bool*)res);
thrust::copy_if(thrust::make_counting_iterator((unsigned int)0), thrust::make_counting_iterator((unsigned int)a->mRecCount),
bp, a->prm_d.begin(), thrust::logical_not<bool>());
a->mRecCount = thrust::count(bp, bp + (unsigned int)a->mRecCount, 0);
hipFree(res);
// cout << "Remained recs count " << a->mRecCount << endl;
if(a->mRecCount > maxRecs)
maxRecs = a->mRecCount;
if (a->mRecCount) {
totalRemoved = totalRemoved + (tmp - a->mRecCount);
if (a->mRecCount == tmp) { //none deleted
if(new_seg_count != i) {
for (auto it=data_dict[a->load_file_name].begin() ; it != data_dict[a->load_file_name].end(); ++it ) {
auto colname = (*it).first;
str_old = a->load_file_name + "." + colname + "." + to_string(i);
str = a->load_file_name + "." + colname + "." + to_string(new_seg_count);
remove(str.c_str());
rename(str_old.c_str(), str.c_str());
};
};
new_seg_count++;
}
else { //some deleted
//cout << "writing segment " << new_seg_count << endl;
map<string, col_data> s = data_dict[a->load_file_name];
for ( map<string, col_data>::iterator it=s.begin() ; it != s.end(); ++it ) {
string colname = (*it).first;
str = a->load_file_name + "." + colname + "." + to_string(new_seg_count);
if(a->type[colname] == 0) {
thrust::device_ptr<int_type> d_col((int_type*)d);
thrust::gather(a->prm_d.begin(), a->prm_d.begin() + a->mRecCount, a->d_columns_int[colname].begin(), d_col);
pfor_compress( d, a->mRecCount*int_size, str, a->h_columns_int[colname], 0);
}
else
if(a->type[colname] == 1) {
thrust::device_ptr<float_type> d_col((float_type*)d);
if(a->decimal[colname]) {
thrust::gather(a->prm_d.begin(), a->prm_d.begin() + a->mRecCount, a->d_columns_float[colname].begin(), d_col);
thrust::device_ptr<long long int> d_col_dec((long long int*)d);
thrust::transform(d_col,d_col+a->mRecCount, d_col_dec, float_to_long());
pfor_compress( d, a->mRecCount*float_size, str, a->h_columns_float[colname], 1);
}
else {
thrust::gather(a->prm_d.begin(), a->prm_d.begin() + a->mRecCount, a->d_columns_float[colname].begin(), d_col);
thrust::copy(d_col, d_col + a->mRecCount, a->h_columns_float[colname].begin());
fstream binary_file(str.c_str(),ios::out|ios::binary);
binary_file.write((char *)&a->mRecCount, 4);
binary_file.write((char *)(a->h_columns_float[colname].data()),a->mRecCount*float_size);
unsigned int comp_type = 3;
binary_file.write((char *)&comp_type, 4);
binary_file.close();
};
}
else {
thrust::device_ptr<int_type> d_col((int_type*)d);
thrust::gather(a->prm_d.begin(), a->prm_d.begin() + a->mRecCount, a->d_columns_int[colname].begin(), d_col);
pfor_compress( d, a->mRecCount*int_size, str + ".hash", a->h_columns_int[colname], 0);
};
};
new_seg_count++;
};
}
else {
totalRemoved = totalRemoved + tmp;
};
}
}
else {
if(new_seg_count != i) {
for(unsigned int z = 0; z < a->columnNames.size(); z++) {
str_old = a->load_file_name + "." + a->columnNames[z] + "." + to_string(i);
str = a->load_file_name + "." + a->columnNames[z] + "." + to_string(new_seg_count);
remove(str.c_str());
rename(str_old.c_str(), str.c_str());
};
};
new_seg_count++;
maxRecs = a->maxRecs;
};
};
if (new_seg_count < a->segCount) {
for(unsigned int i = new_seg_count; i < a->segCount; i++) {
//cout << "delete segment " << i << endl;
for(unsigned int z = 0; z < a->columnNames.size(); z++) {
str = a->load_file_name + "." + a->columnNames[z];
str += "." + to_string(i);
remove(str.c_str());
};
};
};
for(unsigned int i = new_seg_count; i < a->segCount; i++) {
a->reWriteHeader(a->load_file_name, a->columnNames[i], new_seg_count, a->totalRecs-totalRemoved, maxRecs);
};
a->mRecCount = cc;
a->prm_d.resize(0);
a->segCount = new_seg_count;
a->deAllocOnDevice();
hipFree(d);
};
};
void save_col_data(map<string, map<string, col_data> >& data_dict, string file_name)
{
size_t str_len;
fstream binary_file(file_name.c_str(),ios::out|ios::binary|ios::trunc);
size_t len = data_dict.size();
binary_file.write((char *)&len, 8);
for (auto it=data_dict.begin() ; it != data_dict.end(); ++it ) {
str_len = (*it).first.size();
binary_file.write((char *)&str_len, 8);
binary_file.write((char *)(*it).first.data(), str_len);
map<string, col_data> s = (*it).second;
size_t len1 = s.size();
binary_file.write((char *)&len1, 8);
for (auto sit=s.begin() ; sit != s.end(); ++sit ) {
str_len = (*sit).first.size();
binary_file.write((char *)&str_len, 8);
binary_file.write((char *)(*sit).first.data(), str_len);
binary_file.write((char *)&(*sit).second.col_type, 4);
binary_file.write((char *)&(*sit).second.col_length, 4);
};
};
binary_file.close();
}
void load_col_data(map<string, map<string, col_data> >& data_dict, string file_name)
{
size_t str_len, recs, len1;
string str1, str2;
char buffer[4000];
unsigned int col_type, col_length;
fstream binary_file;
binary_file.open(file_name.c_str(),ios::in|ios::binary);
if(binary_file.is_open()) {
binary_file.read((char*)&recs, 8);
for(unsigned int i = 0; i < recs; i++) {
binary_file.read((char*)&str_len, 8);
binary_file.read(buffer, str_len);
str1.assign(buffer, str_len);
binary_file.read((char*)&len1, 8);
for(unsigned int j = 0; j < len1; j++) {
binary_file.read((char*)&str_len, 8);
binary_file.read(buffer, str_len);
str2.assign(buffer, str_len);
binary_file.read((char*)&col_type, 4);
binary_file.read((char*)&col_length, 4);
data_dict[str1][str2].col_type = col_type;
data_dict[str1][str2].col_length = col_length;
//cout << "data DICT " << str1 << " " << str2 << " " << col_type << " " << col_length << endl;
};
};
binary_file.close();
}
else {
cout << "Couldn't open data dictionary" << endl;
};
}
bool var_exists(CudaSet* a, string name) {
if(std::find(a->columnNames.begin(), a->columnNames.end(), name) != a->columnNames.end())
return 1;
else
return 0;
}
int file_exist (const char *filename)
{
std::ifstream infile(filename);
return infile.good();
}
bool check_bitmap_file_exist(CudaSet* left, CudaSet* right)
{
queue<string> cols(right->fil_value);
bool bitmaps_exist = 1;
if(cols.size() == 0) {
bitmaps_exist = 0;
};
while(cols.size() ) {
if (std::find(right->columnNames.begin(), right->columnNames.end(), cols.front()) != right->columnNames.end()) {
string fname = left->load_file_name + "." + right->load_file_name + "." + cols.front() + ".0";
if( !file_exist(fname.c_str())) {
bitmaps_exist = 0;
};
};
cols.pop();
};
return bitmaps_exist;
}
bool check_bitmaps_exist(CudaSet* left, CudaSet* right)
{
//check if there are join bitmap indexes
queue<string> cols(right->fil_value);
bool bitmaps_exist = 1;
if(cols.size() == 0) {
bitmaps_exist = 1;
return 1;
};
while(cols.size() ) {
if (std::find(right->columnNames.begin(), right->columnNames.end(), cols.front()) != right->columnNames.end()) {
string fname = left->load_file_name + "." + right->load_file_name + "." + cols.front() + ".0";
if( !file_exist(fname.c_str())) {
bitmaps_exist = 0;
};
};
cols.pop();
};
if(bitmaps_exist) {
while(!right->fil_nums.empty() ) {
left->fil_nums.push(right->fil_nums.front());
right->fil_nums.pop();
};
while(!right->fil_nums_precision.empty() ) {
left->fil_nums_precision.push(right->fil_nums_precision.front());
right->fil_nums_precision.pop();
};
while(!right->fil_nums_f.empty() ) {
left->fil_nums_f.push(right->fil_nums_f.front());
right->fil_nums_f.pop();
};
while(!right->fil_value.empty() ) {
if (std::find(right->columnNames.begin(), right->columnNames.end(), right->fil_value.front()) != right->columnNames.end()) {
string fname = left->load_file_name + "." + right->load_file_name + "." + right->fil_value.front();
left->fil_value.push(fname);
}
else
left->fil_value.push(right->fil_value.front());
right->fil_value.pop();
};
bool add_and = 1;
if(left->fil_type.empty())
add_and = 0;
while(!right->fil_type.empty() ) {
left->fil_type.push(right->fil_type.front());
right->fil_type.pop();
};
if(add_and) {
left->fil_type.push("AND");
};
return 1;
}
else {
return 0;
};
}
void check_sort(const string str, const char* rtable, const char* rid)
{
CudaSet* right = varNames.find(rtable)->second;
fstream binary_file(str.c_str(),ios::out|ios::binary|ios::app);
binary_file.write((char *)&right->sort_check, 1);
binary_file.close();
}
void update_char_permutation(CudaSet* a, string colname, unsigned int* raw_ptr, string ord, void* temp, bool host)
{
auto s = a->string_map[colname];
auto pos = s.find_first_of(".");
auto len = data_dict[s.substr(0, pos)][s.substr(pos+1)].col_length;
a->h_columns_char[colname] = new char[a->mRecCount*len];
memset(a->h_columns_char[colname], 0, a->mRecCount*len);
thrust::device_ptr<unsigned int> perm(raw_ptr);
thrust::device_ptr<int_type> temp_int((int_type*)temp);
thrust::gather(perm, perm+a->mRecCount, a->d_columns_int[colname].begin(), temp_int);
//for(int z = 0 ; z < a->mRecCount; z++) {
//cout << "Init vals " << a->d_columns_int[colname][z] << " " << perm[z] << " " << temp_int[z] << endl;
//};
//cout << "sz " << a->h_columns_int[colname].size() << " " << a->d_columns_int[colname].size() << " " << len << endl;
hipMemcpy(thrust::raw_pointer_cast(a->h_columns_int[colname].data()), temp, 8*a->mRecCount, hipMemcpyDeviceToHost);
FILE *f;
f = fopen(a->string_map[colname].c_str(), "rb");
for(int z = 0 ; z < a->mRecCount; z++) {
fseek(f, a->h_columns_int[colname][z] * len, SEEK_SET);
fread(a->h_columns_char[colname] + z*len, 1, len, f);
};
fclose(f);
if(!host) {
void *d;
hipMalloc((void **) &d, a->mRecCount*len);
a->d_columns_char[colname] = (char*)d;
hipMemcpy(a->d_columns_char[colname], a->h_columns_char[colname], len*a->mRecCount, hipMemcpyHostToDevice);
if (ord.compare("DESC") == 0 )
str_sort(a->d_columns_char[colname], a->mRecCount, raw_ptr, 1, len);
else
str_sort(a->d_columns_char[colname], a->mRecCount, raw_ptr, 0, len);
hipFree(d);
}
else {
if (ord.compare("DESC") == 0 )
str_sort_host(a->h_columns_char[colname], a->mRecCount, raw_ptr, 1, len);
else
str_sort_host(a->h_columns_char[colname], a->mRecCount, raw_ptr, 0, len);
};
}
void compress_int(const string file_name, const thrust::host_vector<int_type>& res)
{
std::vector<unsigned int> dict_val;
unsigned int bits_encoded;
set<int_type> dict_s;
map<int_type, unsigned int> d_ordered;
for (unsigned int i = 0 ; i < res.size(); i++) {
int_type f = res[i];
dict_s.insert(f);
};
unsigned int i = 0;
for (auto it = dict_s.begin(); it != dict_s.end(); it++) {
d_ordered[*it] = i++;
};
for (unsigned int i = 0 ; i < res.size(); i++) {
int_type f = res[i];
dict_val.push_back(d_ordered[f]);
};
bits_encoded = (unsigned int)ceil(log2(double(d_ordered.size()+1)));
//cout << "bits " << bits_encoded << endl;
unsigned int sz = (unsigned int)d_ordered.size();
// write to a file
fstream binary_file(file_name.c_str(),ios::out|ios::binary|ios::trunc);
binary_file.write((char *)&sz, 4);
for (auto it = d_ordered.begin(); it != d_ordered.end(); it++) {
binary_file.write((char*)(&(it->first)), int_size);
};
unsigned int fit_count = 64/bits_encoded;
unsigned long long int val = 0;
binary_file.write((char *)&fit_count, 4);
binary_file.write((char *)&bits_encoded, 4);
unsigned int curr_cnt = 1;
unsigned int vals_count = (unsigned int)dict_val.size()/fit_count;
if(!vals_count || dict_val.size()%fit_count)
vals_count++;
binary_file.write((char *)&vals_count, 4);
unsigned int real_count = (unsigned int)dict_val.size();
binary_file.write((char *)&real_count, 4);
for(unsigned int i = 0; i < dict_val.size(); i++) {
val = val | dict_val[i];
if(curr_cnt < fit_count)
val = val << bits_encoded;
if( (curr_cnt == fit_count) || (i == (dict_val.size() - 1)) ) {
if (curr_cnt < fit_count) {
val = val << ((fit_count-curr_cnt)-1)*bits_encoded;
};
curr_cnt = 1;
binary_file.write((char *)&val, int_size);
val = 0;
}
else
curr_cnt = curr_cnt + 1;
};
binary_file.close();
};
int_type* get_vec(CudaSet* a, string s1_val, stack<int_type*>& exe_vectors) {
int_type* t;
if(std::find(a->columnNames.begin(), a->columnNames.end(), s1_val) != a->columnNames.end())
t = a->get_int_by_name(s1_val);
else {
t = exe_vectors.top();
exe_vectors.pop();
}
return t;
};
int_type* get_host_vec(CudaSet* a, string s1_val, stack<int_type*>& exe_vectors) {
int_type* t;
if(std::find(a->columnNames.begin(), a->columnNames.end(), s1_val) != a->columnNames.end()) {
t = a->get_host_int_by_name(s1_val);
}
else {
t = exe_vectors.top();
thrust::device_ptr<int_type> st1((int_type*)t);
for(int z = 0; z < 10; z++)
cout << "RESVEC " << st1[z] << endl;
exe_vectors.pop();
}
return t;
};
unsigned int get_decimals(CudaSet* a, string s1_val, stack<unsigned int>& exe_precision) {
unsigned int t;
if(std::find(a->columnNames.begin(), a->columnNames.end(), s1_val) != a->columnNames.end())
t = a->decimal_zeroes[s1_val];
else {
t = exe_precision.top();
exe_precision.pop();
}
return t;
};
#ifdef _WIN64
size_t getTotalSystemMemory()
{
MEMORYSTATUSEX status;
status.dwLength = sizeof(status);
GlobalMemoryStatusEx(&status);
return status.ullTotalPhys;
}
#else
size_t getTotalSystemMemory()
{
long pages = sysconf(_SC_PHYS_PAGES);
long page_size = sysconf(_SC_PAGE_SIZE);
return pages * page_size;
}
#endif
| 0634d7c71616d82668c411d4430c436f37208f7c.cu | /*
/*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include <cctype>
#include <algorithm>
#include <functional>
#include <numeric>
#include <ctime>
#include <time.h>
#include "cm.h"
#include "atof.h"
#include "compress.cu"
#include "sorts.cu"
#include "filter.h"
#include "callbacks.h"
#include "zone_map.h"
#ifdef _WIN64
#define atoll(S) _atoi64(S)
#define fseek(S, S1, S2) _fseeki64(S, S1, S2)
#include <windows.h>
#else
#include <unistd.h>
#endif
using namespace std;
using namespace thrust::placeholders;
size_t total_count = 0, total_max;
clock_t tot;
unsigned int total_segments = 0, old_segments;
size_t process_count;
size_t alloced_sz = 0;
bool fact_file_loaded = 1;
bool verbose;
bool interactive, ssd, delta, star;
unsigned int prs;
void* d_v = nullptr;
void* s_v = nullptr;
queue<string> op_sort;
queue<string> op_presort;
queue<string> op_type;
bool op_case = 0;
string grp_val;
queue<string> op_value;
queue<int_type> op_nums;
queue<float_type> op_nums_f;
queue<unsigned int> op_nums_precision;
queue<string> col_aliases;
map<string, map<string, col_data> > data_dict;
unordered_map<string, unordered_map<unsigned long long int, size_t> > char_hash;
map<string, char*> index_buffers;
map<string, unsigned long long int*> idx_vals;
map<string, char*> buffers;
map<string, size_t> buffer_sizes;
size_t total_buffer_size;
queue<string> buffer_names;
void* alloced_tmp;
bool alloced_switch = 0;
map<string,CudaSet*> varNames; // STL map to manage CudaSet variables
map<string, unsigned int> cpy_bits;
map<string, long long int> cpy_init_val;
char* readbuff = nullptr;
thrust::device_vector<unsigned int> rcol_matches;
thrust::device_vector<int_type> rcol_dev;
struct f_equal_to
{
__host__ __device__
bool operator()(const float_type x, const float_type y)
{
return (((x-y) < EPSILON) && ((x-y) > -EPSILON));
}
};
struct f_less
{
__host__ __device__
bool operator()(const float_type x, const float_type y)
{
return ((y-x) > EPSILON);
}
};
struct f_greater
{
__host__ __device__
bool operator()(const float_type x, const float_type y)
{
return ((x-y) > EPSILON);
}
};
struct f_greater_equal_to
{
__host__ __device__
bool operator()(const float_type x, const float_type y)
{
return (((x-y) > EPSILON) || (((x-y) < EPSILON) && ((x-y) > -EPSILON)));
}
};
struct f_less_equal
{
__host__ __device__
bool operator()(const float_type x, const float_type y)
{
return (((y-x) > EPSILON) || (((x-y) < EPSILON) && ((x-y) > -EPSILON)));
}
};
struct f_not_equal_to
{
__host__ __device__
bool operator()(const float_type x, const float_type y)
{
return ((x-y) > EPSILON) || ((x-y) < -EPSILON);
}
};
struct long_to_float_type
{
__host__ __device__
float_type operator()(const int_type x)
{
return (float_type)x;
}
};
template <typename T>
struct power_functor : public thrust::unary_function<T,T>
{
unsigned int a;
__host__ __device__
power_functor(unsigned int a_) {
a = a_;
}
__host__ __device__
T operator()(T x)
{
return x*(unsigned int)pow((double)10,(double)a);
}
};
struct is_zero
{
__host__ __device__
bool operator()(const int &x)
{
return x == 0;
}
};
int get_utc_offset() {
time_t zero = 24*60*60L;
struct tm * timeptr;
int gmtime_hours;
/* get the local time for Jan 2, 1900 00:00 UTC */
timeptr = localtime( &zero );
gmtime_hours = timeptr->tm_hour;
/* if the local time is the "day before" the UTC, subtract 24 hours
from the hours to get the UTC offset */
if( timeptr->tm_mday < 2 )
gmtime_hours -= 24;
return gmtime_hours;
}
/*
the utc analogue of mktime,
(much like timegm on some systems)
*/
time_t tm_to_time_t_utc( struct tm * timeptr ) {
/* gets the epoch time relative to the local time zone,
and then adds the appropriate number of seconds to make it UTC */
return mktime( timeptr ) + get_utc_offset() * 3600;
}
/*class power_functor {
unsigned int a;
public:
power_functor(unsigned int a_) { a = a_; }
__host__ __device__ int_type operator()(int_type x) const
{
return x*(unsigned int)pow((double)10,(double)a);
}
};
*/
void allocColumns(CudaSet* a, queue<string> fields);
void copyColumns(CudaSet* a, queue<string> fields, unsigned int segment, size_t& count, bool rsz, bool flt);
void mygather(unsigned int tindex, unsigned int idx, CudaSet* a, CudaSet* t, size_t count, size_t g_size);
void mycopy(unsigned int tindex, unsigned int idx, CudaSet* a, CudaSet* t, size_t count, size_t g_size);
void write_compressed_char(string file_name, unsigned int index, size_t mCount);
size_t getFreeMem();
size_t getTotalSystemMemory();
void process_error(int severity, string err);
CudaSet::CudaSet(queue<string> &nameRef, queue<string> &typeRef, queue<int> &sizeRef, queue<int> &colsRef, size_t Recs)
: mColumnCount(0), mRecCount(0)
{
initialize(nameRef, typeRef, sizeRef, colsRef, Recs);
source = 1;
text_source = 1;
fil_f = nullptr;
fil_s = nullptr;
};
CudaSet::CudaSet(queue<string> &nameRef, queue<string> &typeRef, queue<int> &sizeRef, queue<int> &colsRef, size_t Recs, string file_name, unsigned int max)
: mColumnCount(0), mRecCount(0)
{
maxRecs = max;
initialize(nameRef, typeRef, sizeRef, colsRef, Recs, file_name);
source = 1;
text_source = 0;
fil_f = nullptr;
fil_s = nullptr;
};
CudaSet::CudaSet(const size_t RecordCount, const unsigned int ColumnCount)
{
initialize(RecordCount, ColumnCount);
keep = false;
source = 0;
text_source = 0;
fil_f = nullptr;
fil_s = nullptr;
};
CudaSet::CudaSet(queue<string> op_sel, const queue<string> op_sel_as)
{
initialize(op_sel, op_sel_as);
keep = false;
source = 0;
text_source = 0;
fil_f = nullptr;
fil_s = nullptr;
};
CudaSet::CudaSet(CudaSet* a, CudaSet* b, queue<string> op_sel, queue<string> op_sel_as)
{
initialize(a,b, op_sel, op_sel_as);
keep = false;
source = 0;
text_source = 0;
fil_f = nullptr;
fil_s = nullptr;
};
CudaSet::~CudaSet()
{
free();
};
void CudaSet::allocColumnOnDevice(string colname, size_t RecordCount)
{
if (type[colname] != 1 ) {
d_columns_int[colname].resize(RecordCount);
}
else
d_columns_float[colname].resize(RecordCount);
};
void CudaSet::resize_join(size_t addRecs)
{
mRecCount = mRecCount + addRecs;
for(unsigned int i=0; i < columnNames.size(); i++) {
if(type[columnNames[i]] != 1) {
h_columns_int[columnNames[i]].resize(mRecCount);
}
else
h_columns_float[columnNames[i]].resize(mRecCount);
};
};
void CudaSet::resize(size_t addRecs)
{
mRecCount = mRecCount + addRecs;
for(unsigned int i=0; i < columnNames.size(); i++) {
if(type[columnNames[i]] != 1) {
h_columns_int[columnNames[i]].resize(mRecCount);
}
else {
h_columns_float[columnNames[i]].resize(mRecCount);
}
};
};
void CudaSet::deAllocColumnOnDevice(string colname)
{
if (type[colname] != 1 && !d_columns_int.empty() && d_columns_int.find(colname) != d_columns_int.end()) {
if(d_columns_int[colname].size() > 0) {
d_columns_int[colname].resize(0);
d_columns_int[colname].shrink_to_fit();
};
}
else
if (type[colname] == 1 && !d_columns_float.empty()) {
if (d_columns_float[colname].size() > 0) {
d_columns_float[colname].resize(0);
d_columns_float[colname].shrink_to_fit();
};
};
};
void CudaSet::allocOnDevice(size_t RecordCount)
{
for(unsigned int i=0; i < columnNames.size(); i++)
allocColumnOnDevice(columnNames[i], RecordCount);
};
void CudaSet::deAllocOnDevice()
{
for(unsigned int i=0; i < columnNames.size(); i++) {
deAllocColumnOnDevice(columnNames[i]);
};
if(prm_d.size()) {
prm_d.resize(0);
prm_d.shrink_to_fit();
};
for (auto it=d_columns_int.begin(); it != d_columns_int.end(); ++it ) {
if(it->second.size() > 0) {
it->second.resize(0);
it->second.shrink_to_fit();
};
};
for (auto it=d_columns_float.begin(); it != d_columns_float.end(); ++it ) {
if(it->second.size() > 0) {
it->second.resize(0);
it->second.shrink_to_fit();
};
};
if(filtered) { // dealloc the source
if(varNames.find(source_name) != varNames.end()) {
varNames[source_name]->deAllocOnDevice();
};
};
};
void CudaSet::resizeDeviceColumn(size_t RecCount, string colname)
{
if (type[colname] != 1) {
d_columns_int[colname].resize(RecCount);
}
else
d_columns_float[colname].resize(RecCount);
};
void CudaSet::resizeDevice(size_t RecCount)
{
for(unsigned int i=0; i < columnNames.size(); i++) {
resizeDeviceColumn(RecCount, columnNames[i]);
};
};
bool CudaSet::onDevice(string colname)
{
if (type[colname] != 1) {
if (!d_columns_int.empty() && d_columns_int[colname].size())
return 1;
}
else
if (!d_columns_float.empty() && d_columns_float[colname].size())
return 1;
return 0;
}
CudaSet* CudaSet::copyDeviceStruct()
{
CudaSet* a = new CudaSet(mRecCount, mColumnCount);
a->not_compressed = not_compressed;
a->segCount = segCount;
a->maxRecs = maxRecs;
a->columnNames = columnNames;
a->ts_cols = ts_cols;
a->cols = cols;
a->type = type;
a->char_size = char_size;
a->decimal = decimal;
a->decimal_zeroes = decimal_zeroes;
for(unsigned int i=0; i < columnNames.size(); i++) {
if(a->type[columnNames[i]] == 0) {
a->d_columns_int[columnNames[i]] = thrust::device_vector<int_type>();
a->h_columns_int[columnNames[i]] = thrust::host_vector<int_type, uninitialized_host_allocator<int_type> >();
}
else
if(a->type[columnNames[i]] == 1) {
a->d_columns_float[columnNames[i]] = thrust::device_vector<float_type>();
a->h_columns_float[columnNames[i]] = thrust::host_vector<float_type, uninitialized_host_allocator<float_type> >();
}
else {
a->h_columns_char[columnNames[i]] = nullptr;
a->d_columns_char[columnNames[i]] = nullptr;
};
};
a->load_file_name = load_file_name;
a->mRecCount = 0;
return a;
}
int_type CudaSet::readSsdSegmentsFromFile(unsigned int segNum, string colname, size_t offset, thrust::host_vector<unsigned int>& prm_vh, CudaSet* dest)
{
string f1 = load_file_name + "." + colname + "." + to_string(segNum);
FILE* f = fopen(f1.c_str(), "rb" );
if(!f) {
cout << "Error opening " << f1 << " file " << endl;
exit(0);
};
unsigned int cnt, bits;
int_type lower_val;
unsigned short int val_s_r[4096/2];
char val_c_r[4096];
unsigned int val_i_r[4096/4];
unsigned long long int val_l_r[4096/8];
unsigned int idx;
bool idx_set = 0;
fread(&cnt, 4, 1, f);
fread(&lower_val, 8, 1, f);
fseek(f, cnt - (8+4) + 32, SEEK_CUR);
fread(&bits, 4, 1, f);
//cout << "lower_val bits " << lower_val << " " << bits << endl;
if(type[colname] == 0) {
//cout << "lower_val bits " << lower_val << " " << bits << endl;
for(unsigned int i = 0; i < prm_vh.size(); i++) {
if(!idx_set || prm_vh[i] >= idx + 4096/(bits/8)) {
fseek(f, 24 + prm_vh[i]*(bits/8), SEEK_SET);
idx = prm_vh[i];
idx_set = 1;
if(bits == 8) {
fread(&val_c_r[0], 4096, 1, f);
dest->h_columns_int[colname][i + offset] = val_c_r[0];
}
else
if(bits == 16) {
fread(&val_s_r, 4096, 1, f);
dest->h_columns_int[colname][i + offset] = val_s_r[0];
}
if(bits == 32) {
fread(&val_i_r, 4096, 1, f);
dest->h_columns_int[colname][i + offset] = val_i_r[0];
}
if(bits == 84) {
fread(&val_l_r, 4096, 1, f);
dest->h_columns_int[colname][i + offset] = val_l_r[0];
}
}
else {
if(bits == 8) {
dest->h_columns_int[colname][i + offset] = val_c_r[prm_vh[i]-idx];
}
else
if(bits == 16) {
dest->h_columns_int[colname][i + offset] = val_s_r[prm_vh[i]-idx];
}
if(bits == 32) {
dest->h_columns_int[colname][i + offset] = val_i_r[prm_vh[i]-idx];
}
if(bits == 84) {
dest->h_columns_int[colname][i + offset] = val_l_r[prm_vh[i]-idx];
}
};
};
}
else
if(type[colname] == 1) {
for(unsigned int i = 0; i < prm_vh.size(); i++) {
if(!idx_set || prm_vh[i] >= idx + 4096/(bits/8)) {
fseek(f, 24 + prm_vh[i]*(bits/8), SEEK_SET);
idx = prm_vh[i];
idx_set = 1;
fread(val_c_r, 4096, 1, f);
memcpy(&dest->h_columns_float[colname][i + offset], &val_c_r[0], bits/8);
}
else {
memcpy(&dest->h_columns_float[colname][i + offset], &val_c_r[(prm_vh[i]-idx)*(bits/8)], bits/8);
};
};
}
else {
//no strings in fact tables
};
fclose(f);
return lower_val;
}
int_type CudaSet::readSsdSegmentsFromFileR(unsigned int segNum, string colname, thrust::host_vector<unsigned int>& prm_vh, thrust::host_vector<unsigned int>& dest)
{
string f1 = load_file_name + "." + colname + "." + to_string(segNum);
FILE* f = fopen(f1.c_str(), "rb" );
if(!f) {
cout << "Error opening " << f1 << " file " << endl;
exit(0);
};
unsigned int cnt, bits;
int_type lower_val;
fread(&cnt, 4, 1, f);
fread(&lower_val, 8, 1, f);
fseek(f, cnt - (8+4) + 32, SEEK_CUR);
fread(&bits, 4, 1, f);
unsigned short int val_s_r[4096/2];
char val_c_r[4096];
unsigned int val_i_r[4096/4];
unsigned long long int val_l_r[4096/8];
unsigned int idx;
bool idx_set = 0;
for(unsigned int i = 0; i < prm_vh.size(); i++) {
if(!idx_set || prm_vh[i] >= idx + 4096/(bits/8)) {
fseek(f, 24 + prm_vh[i]*(bits/8), SEEK_SET);
idx = prm_vh[i];
idx_set = 1;
if(bits == 8) {
fread(val_c_r, 4096, 1, f);
dest[i] = val_c_r[0];
}
else
if(bits == 16) {
fread(val_s_r, 4096, 1, f);
dest[i] = val_s_r[0];
}
if(bits == 32) {
fread(val_i_r, 4096, 1, f);
dest[i] = val_i_r[0];
}
if(bits == 84) {
fread(val_l_r, 4096, 1, f);
dest[i] = val_l_r[0];
}
}
else {
if(bits == 8) {
dest[i] = val_c_r[prm_vh[i]-idx];
}
else
if(bits == 16) {
dest[i] = val_s_r[prm_vh[i]-idx];
}
if(bits == 32) {
dest[i] = val_i_r[prm_vh[i]-idx];
}
if(bits == 84) {
dest[i] = val_l_r[prm_vh[i]-idx];
}
};
};
fclose(f);
return lower_val;
}
std::clock_t tot_disk;
void CudaSet::readSegmentsFromFile(unsigned int segNum, string colname)
{
string f1 = load_file_name + "." + colname + "." + to_string(segNum);
if(type[colname] == 2)
f1 = f1 + ".idx";
std::clock_t start1 = std::clock();
if(interactive) { //check if data are in buffers
if(buffers.find(f1) == buffers.end()) { // add data to buffers
FILE* f = fopen(f1.c_str(), "rb" );
if(!f) {
process_error(3, "Error opening " + string(f1) +" file " );
};
fseek(f, 0, SEEK_END);
long fileSize = ftell(f);
while(total_buffer_size + fileSize > getTotalSystemMemory() && !buffer_names.empty()) { //free some buffers
//delete [] buffers[buffer_names.front()];
cudaFreeHost(buffers[buffer_names.front()]);
total_buffer_size = total_buffer_size - buffer_sizes[buffer_names.front()];
buffer_sizes.erase(buffer_names.front());
buffers.erase(buffer_names.front());
buffer_names.pop();
};
fseek(f, 0, SEEK_SET);
char* buff;
cudaHostAlloc((void**) &buff, fileSize,cudaHostAllocDefault);
fread(buff, fileSize, 1, f);
fclose(f);
buffers[f1] = buff;
buffer_sizes[f1] = fileSize;
buffer_names.push(f1);
total_buffer_size = total_buffer_size + fileSize;
buffer_names.push(f1);
cout << "added buffer " << f1 << " " << fileSize << endl;
};
// get data from buffers
if(type[colname] != 1) {
unsigned int cnt = ((unsigned int*)buffers[f1])[0];
if(cnt > h_columns_int[colname].size()/8 + 10)
h_columns_int[colname].resize(cnt/8 + 10);
}
else {
unsigned int cnt = ((unsigned int*)buffers[f1])[0];
if(cnt > h_columns_float[colname].size()/8 + 10)
h_columns_float[colname].resize(cnt/8 + 10);
}
}
else {
FILE* f = fopen(f1.c_str(), "rb" );
if(!f) {
cout << "Error opening " << f1 << " file " << endl;
exit(0);
};
if(type[colname] != 1) {
if(1 > h_columns_int[colname].size())
h_columns_int[colname].resize(1);
fread(h_columns_int[colname].data(), 4, 1, f);
unsigned int cnt = ((unsigned int*)(h_columns_int[colname].data()))[0];
if(cnt/8+10 > h_columns_int[colname].size()) {
h_columns_int[colname].resize(cnt + 10);
};
size_t rr = fread((unsigned int*)(h_columns_int[colname].data()) + 1, 1, cnt+52, f);
if(rr != cnt+52) {
char buf[1024];
sprintf(buf, "Couldn't read %d bytes from %s ,read only", cnt+52, f1.c_str());
process_error(3, string(buf));
};
}
else {
if(1 > h_columns_float[colname].size())
h_columns_float[colname].resize(1);
fread(h_columns_float[colname].data(), 4, 1, f);
unsigned int cnt = ((unsigned int*)(h_columns_float[colname].data()))[0];
if(cnt/8+10 > h_columns_float[colname].size())
h_columns_float[colname].resize(cnt + 10);
size_t rr = fread((unsigned int*)(h_columns_float[colname].data()) + 1, 1, cnt+52, f);
if(rr != cnt+52) {
char buf[1024];
sprintf(buf, "Couldn't read %d bytes from %s ,read only", cnt+52, f1.c_str());
process_error(3, string(buf));
};
}
fclose(f);
};
tot_disk = tot_disk + (std::clock() - start1);
};
void CudaSet::CopyColumnToGpu(string colname, unsigned int segment, size_t offset)
{
if(not_compressed) {
// calculate how many records we need to copy
if(segment < segCount-1) {
mRecCount = maxRecs;
}
else {
mRecCount = hostRecCount - maxRecs*(segCount-1);
};
if(type[colname] != 1) {
if(!alloced_switch) {
thrust::copy(h_columns_int[colname].begin() + maxRecs*segment, h_columns_int[colname].begin() + maxRecs*segment + mRecCount, d_columns_int[colname].begin() + offset);
}
else {
thrust::device_ptr<int_type> d_col((int_type*)alloced_tmp);
thrust::copy(h_columns_int[colname].begin() + maxRecs*segment, h_columns_int[colname].begin() + maxRecs*segment + mRecCount, d_col);
};
}
else {
if(!alloced_switch) {
thrust::copy(h_columns_float[colname].begin() + maxRecs*segment, h_columns_float[colname].begin() + maxRecs*segment + mRecCount, d_columns_float[colname].begin() + offset);
}
else {
thrust::device_ptr<float_type> d_col((float_type*)alloced_tmp);
thrust::copy(h_columns_float[colname].begin() + maxRecs*segment, h_columns_float[colname].begin() + maxRecs*segment + mRecCount, d_col);
};
}
}
else {
readSegmentsFromFile(segment,colname);
if(!d_v)
CUDA_SAFE_CALL(cudaMalloc((void **) &d_v, 12));
if(!s_v)
CUDA_SAFE_CALL(cudaMalloc((void **) &s_v, 8));
string f1;
if(type[colname] == 2) {
f1 = load_file_name + "." + colname + "." + to_string(segment) + ".idx";
}
else {
f1 = load_file_name + "." + colname + "." + to_string(segment);
};
if(type[colname] != 1) {
if(!alloced_switch) {
if(buffers.find(f1) == buffers.end()) {
mRecCount = pfor_decompress(thrust::raw_pointer_cast(d_columns_int[colname].data() + offset), h_columns_int[colname].data(), d_v, s_v, colname);
}
else {
mRecCount = pfor_decompress(thrust::raw_pointer_cast(d_columns_int[colname].data() + offset), buffers[f1], d_v, s_v, colname);
};
}
else {
if(buffers.find(f1) == buffers.end()) {
mRecCount = pfor_decompress(alloced_tmp, h_columns_int[colname].data(), d_v, s_v, colname);
}
else {
mRecCount = pfor_decompress(alloced_tmp, buffers[f1], d_v, s_v, colname);
};
};
}
else {
if(decimal[colname]) {
if(!alloced_switch) {
if(buffers.find(f1) == buffers.end()) {
mRecCount = pfor_decompress( thrust::raw_pointer_cast(d_columns_float[colname].data() + offset) , h_columns_float[colname].data(), d_v, s_v, colname);
}
else {
mRecCount = pfor_decompress( thrust::raw_pointer_cast(d_columns_float[colname].data() + offset) , buffers[f1], d_v, s_v, colname);
};
if(!phase_copy) {
thrust::device_ptr<long long int> d_col_int((long long int*)thrust::raw_pointer_cast(d_columns_float[colname].data() + offset));
thrust::transform(d_col_int,d_col_int+mRecCount,d_columns_float[colname].begin(), long_to_float());
};
}
else {
if(buffers.find(f1) == buffers.end()) {
mRecCount = pfor_decompress(alloced_tmp, h_columns_float[colname].data(), d_v, s_v, colname);
}
else {
mRecCount = pfor_decompress(alloced_tmp, buffers[f1], d_v, s_v, colname);
};
if(!phase_copy) {
thrust::device_ptr<long long int> d_col_int((long long int*)alloced_tmp);
thrust::device_ptr<float_type> d_col_float((float_type*)alloced_tmp);
thrust::transform(d_col_int,d_col_int+mRecCount, d_col_float, long_to_float());
};
//for(int i = 0; i < mRecCount;i++)
//cout << "DECOMP " << (float_type)(d_col_int[i]) << " " << d_col_float[i] << endl;
};
}
//else // uncompressed float
// will have to fix it later so uncompressed data will be written by segments too
}
};
}
void CudaSet::CopyColumnToGpu(string colname) // copy all segments
{
if(not_compressed) {
if(type[colname] != 1)
thrust::copy(h_columns_int[colname].begin(), h_columns_int[colname].begin() + mRecCount, d_columns_int[colname].begin());
else
thrust::copy(h_columns_float[colname].begin(), h_columns_float[colname].begin() + mRecCount, d_columns_float[colname].begin());
}
else {
if(!d_v)
CUDA_SAFE_CALL(cudaMalloc((void **) &d_v, 12));
if(!s_v)
CUDA_SAFE_CALL(cudaMalloc((void **) &s_v, 8));
size_t cnt = 0;
string f1;
for(unsigned int i = 0; i < segCount; i++) {
readSegmentsFromFile(i,colname);
if(type[colname] == 2) {
f1 = load_file_name + "." + colname + "." + to_string(i) + ".idx";
}
else {
f1 = load_file_name + "." + colname + "." + to_string(i);
};
if(type[colname] == 0) {
if(buffers.find(f1) == buffers.end()) {
mRecCount = pfor_decompress(thrust::raw_pointer_cast(d_columns_int[colname].data() + cnt), h_columns_int[colname].data(), d_v, s_v, colname);
}
else {
mRecCount = pfor_decompress(thrust::raw_pointer_cast(d_columns_int[colname].data() + cnt), buffers[f1], d_v, s_v, colname);
};
}
else
if(type[colname] == 1) {
if(decimal[colname]) {
if(buffers.find(f1) == buffers.end()) {
mRecCount = pfor_decompress( thrust::raw_pointer_cast(d_columns_float[colname].data() + cnt) , h_columns_float[colname].data(), d_v, s_v, colname);
}
else {
mRecCount = pfor_decompress( thrust::raw_pointer_cast(d_columns_float[colname].data() + cnt) , buffers[f1], d_v, s_v, colname);
};
if(!phase_copy) {
thrust::device_ptr<long long int> d_col_int((long long int*)thrust::raw_pointer_cast(d_columns_float[colname].data() + cnt));
thrust::transform(d_col_int,d_col_int+mRecCount,d_columns_float[colname].begin() + cnt, long_to_float());
};
}
// else uncompressed float
// will have to fix it later so uncompressed data will be written by segments too
};
cnt = cnt + mRecCount;
//totalRecs = totals + mRecCount;
};
mRecCount = cnt;
};
}
void CudaSet::CopyColumnToHost(string colname, size_t offset, size_t RecCount)
{
if(type[colname] != 1) {
thrust::copy(d_columns_int[colname].begin(), d_columns_int[colname].begin() + RecCount, h_columns_int[colname].begin() + offset);
}
else
thrust::copy(d_columns_float[colname].begin(), d_columns_float[colname].begin() + RecCount, h_columns_float[colname].begin() + offset);
}
void CudaSet::CopyColumnToHost(string colname)
{
CopyColumnToHost(colname, 0, mRecCount);
}
void CudaSet::CopyToHost(size_t offset, size_t count)
{
for(unsigned int i = 0; i < columnNames.size(); i++) {
CopyColumnToHost(columnNames[i], offset, count);
};
}
float_type* CudaSet::get_float_type_by_name(string name)
{
return thrust::raw_pointer_cast(d_columns_float[name].data());
}
int_type* CudaSet::get_int_by_name(string name)
{
return thrust::raw_pointer_cast(d_columns_int[name].data());
}
float_type* CudaSet::get_host_float_by_name(string name)
{
return thrust::raw_pointer_cast(h_columns_float[name].data());
}
int_type* CudaSet::get_host_int_by_name(string name)
{
return thrust::raw_pointer_cast(h_columns_int[name].data());
}
void CudaSet::GroupBy(stack<string> columnRef)
{
thrust::device_vector<bool> grp_dev(mRecCount);
thrust::fill(grp_dev.begin(), grp_dev.end(), 0);
if(scratch.size() < mRecCount)
scratch.resize(mRecCount*sizeof(bool));
thrust::device_ptr<bool> d_group((bool*)thrust::raw_pointer_cast(scratch.data()));
d_group[mRecCount-1] = 0;
for(int i = 0; i < columnRef.size(); columnRef.pop()) {
unsigned int bits;
if(cpy_bits.empty())
bits = 0;
else
bits = cpy_bits[columnRef.top()];
if(bits == 8) {
thrust::device_ptr<unsigned char> src((unsigned char*)thrust::raw_pointer_cast(d_columns_int[columnRef.top()].data()));
thrust::transform(src, src + mRecCount - 1, src+1, d_group, thrust::not_equal_to<unsigned char>());
}
else
if(bits == 16) {
thrust::device_ptr<unsigned short int> src((unsigned short int*)thrust::raw_pointer_cast(d_columns_int[columnRef.top()].data()));
thrust::transform(src, src + mRecCount - 1, src+1, d_group, thrust::not_equal_to<unsigned short int>());
}
else
if(bits == 32) {
thrust::device_ptr<unsigned int> src((unsigned int*)thrust::raw_pointer_cast(d_columns_int[columnRef.top()].data()));
thrust::transform(src, src + mRecCount - 1, src+1, d_group, thrust::not_equal_to<unsigned int>());
}
else {
thrust::transform(d_columns_int[columnRef.top()].begin(), d_columns_int[columnRef.top()].begin() + mRecCount - 1,
d_columns_int[columnRef.top()].begin()+1, d_group, thrust::not_equal_to<int_type>());
};
thrust::transform(d_group, d_group+mRecCount, grp_dev.begin(), grp_dev.begin(), thrust::logical_or<bool>());
};
grp_count = thrust::count(grp_dev.begin(), grp_dev.end(), 1) + 1;
//cout << "grp count " << grp_count << endl;
grp.resize(grp_count);
if(grp_count > 1)
thrust::copy_if(thrust::make_counting_iterator((unsigned int)1), thrust::make_counting_iterator((unsigned int)grp_dev.size()),
grp_dev.begin(), grp.begin()+1, thrust::identity<bool>());
grp[0] = 0;
};
void CudaSet::addDeviceColumn(int_type* col, string colname, size_t recCount)
{
if (std::find(columnNames.begin(), columnNames.end(), colname) == columnNames.end()) {
columnNames.push_back(colname);
type[colname] = 0;
d_columns_int[colname] = thrust::device_vector<int_type>(recCount);
h_columns_int[colname] = thrust::host_vector<int_type, uninitialized_host_allocator<int_type> >(recCount);
}
else { // already exists, my need to resize it
if(d_columns_int[colname].size() < recCount) {
d_columns_int[colname].resize(recCount);
};
if(h_columns_int[colname].size() < recCount) {
h_columns_int[colname].resize(recCount);
};
};
// copy data to d columns
thrust::device_ptr<int_type> d_col((int_type*)col);
thrust::copy(d_col, d_col+recCount, d_columns_int[colname].begin());
thrust::copy(d_columns_int[colname].begin(), d_columns_int[colname].begin()+recCount, h_columns_int[colname].begin());
};
void CudaSet::addDeviceColumn(float_type* col, string colname, size_t recCount, bool is_decimal)
{
if (std::find(columnNames.begin(), columnNames.end(), colname) == columnNames.end()) {
columnNames.push_back(colname);
type[colname] = 1;
d_columns_float[colname] = thrust::device_vector<float_type>(recCount);
h_columns_float[colname] = thrust::host_vector<float_type, uninitialized_host_allocator<float_type> >(recCount);
}
else { // already exists, my need to resize it
if(d_columns_float[colname].size() < recCount)
d_columns_float[colname].resize(recCount);
if(h_columns_float[colname].size() < recCount)
h_columns_float[colname].resize(recCount);
};
decimal[colname] = is_decimal;
thrust::device_ptr<float_type> d_col((float_type*)col);
thrust::copy(d_col, d_col+recCount, d_columns_float[colname].begin());
};
void CudaSet::gpu_perm(queue<string> sf, thrust::device_vector<unsigned int>& permutation) {
permutation.resize(mRecCount);
thrust::sequence(permutation.begin(), permutation.begin() + mRecCount,0,1);
unsigned int* raw_ptr = thrust::raw_pointer_cast(permutation.data());
void* temp;
CUDA_SAFE_CALL(cudaMalloc((void **) &temp, mRecCount*8));
string sort_type = "ASC";
while(!sf.empty()) {
if (type[sf.front()] == 0) {
update_permutation(d_columns_int[sf.front()], raw_ptr, mRecCount, sort_type, (int_type*)temp, 64);
}
else
if (type[sf.front()] == 1) {
update_permutation(d_columns_float[sf.front()], raw_ptr, mRecCount, sort_type, (float_type*)temp, 64);
}
else {
thrust::host_vector<unsigned int> permutation_h = permutation;
char* temp1 = new char[char_size[sf.front()]*mRecCount];
update_permutation_char_host(h_columns_char[sf.front()], permutation_h.data(), mRecCount, sort_type, temp1, char_size[sf.front()]);
delete [] temp1;
permutation = permutation_h;
};
sf.pop();
};
cudaFree(temp);
}
void CudaSet::compress(string file_name, size_t offset, unsigned int check_type, unsigned int check_val, size_t mCount, const bool append)
{
string str(file_name);
thrust::device_vector<unsigned int> permutation;
long long int oldCount;
bool int_check = 0;
void* d;
CUDA_SAFE_CALL(cudaMalloc((void **) &d, mCount*float_size));
total_count = total_count + mCount;
if (mCount > total_max && op_sort.empty()) {
total_max = mCount;
};
if(!total_segments && append) {
string s= file_name + "." + columnNames[0] + ".header";
ifstream binary_file(s.c_str(),ios::binary);
if(binary_file) {
binary_file.read((char *)&oldCount, 8);
binary_file.read((char *)&total_segments, 4);
binary_file.read((char *)&maxRecs, 4);
if(total_max < maxRecs)
total_max = maxRecs;
binary_file.close();
total_count = oldCount + mCount;
};
};
if(!op_sort.empty()) { //sort the segment
gpu_perm(op_sort, permutation);
};
// here we need to check for partitions and if partition_count > 0 -> create partitions
if(mCount < partition_count || partition_count == 0)
partition_count = 1;
unsigned int partition_recs = mCount/partition_count;
if(!op_sort.empty()) {
if(total_max < partition_recs)
total_max = partition_recs;
};
total_segments++;
old_segments = total_segments;
size_t new_offset;
for(unsigned int i = 0; i < columnNames.size(); i++) {
std::clock_t start1 = std::clock();
string colname = columnNames[i];
str = file_name + "." + colname;
curr_file = str;
str += "." + to_string(total_segments-1);
new_offset = 0;
if(type[colname] == 0) {
thrust::device_ptr<int_type> d_col((int_type*)d);
if(!op_sort.empty()) {
thrust::gather(permutation.begin(), permutation.end(), d_columns_int[colname].begin(), d_col);
for(unsigned int p = 0; p < partition_count; p++) {
str = file_name + "." + colname;
curr_file = str;
str += "." + to_string(total_segments-1);
if (p < partition_count - 1) {
pfor_compress( (int_type*)d + new_offset, partition_recs*int_size, str, h_columns_int[colname], 0);
}
else {
pfor_compress( (int_type*)d + new_offset, (mCount - partition_recs*p)*int_size, str, h_columns_int[colname], 0);
};
new_offset = new_offset + partition_recs;
total_segments++;
};
}
else {
if(!int_check) {
thrust::copy(h_columns_int[colname].begin() + offset, h_columns_int[colname].begin() + offset + mCount, d_col);
pfor_compress( d, mCount*int_size, str, h_columns_int[colname], 0);
}
else {
pfor_compress( thrust::raw_pointer_cast(d_columns_int[colname].data()), mCount*int_size, str, h_columns_int[colname], 0);
};
};
}
else
if(type[colname] == 1) {
if(decimal[colname]) {
thrust::device_ptr<float_type> d_col((float_type*)d);
if(!op_sort.empty()) {
thrust::gather(permutation.begin(), permutation.end(), d_columns_float[colname].begin(), d_col);
thrust::device_ptr<long long int> d_col_dec((long long int*)d);
thrust::transform(d_col,d_col+mCount,d_col_dec, float_to_long());
for(unsigned int p = 0; p < partition_count; p++) {
str = file_name + "." + colname;
curr_file = str;
str += "." + to_string(total_segments-1);
if (p < partition_count - 1)
pfor_compress( (int_type*)d + new_offset, partition_recs*float_size, str, h_columns_float[colname], 1);
else
pfor_compress( (int_type*)d + new_offset, (mCount - partition_recs*p)*float_size, str, h_columns_float[colname], 1);
new_offset = new_offset + partition_recs;
total_segments++;
};
}
else {
thrust::copy(h_columns_float[colname].begin() + offset, h_columns_float[colname].begin() + offset + mCount, d_col);
thrust::device_ptr<long long int> d_col_dec((long long int*)d);
thrust::transform(d_col,d_col+mCount,d_col_dec, float_to_long());
pfor_compress( d, mCount*float_size, str, h_columns_float[colname], 1);
};
}
else { // do not compress -- float
thrust::device_ptr<float_type> d_col((float_type*)d);
if(!op_sort.empty()) {
thrust::gather(permutation.begin(), permutation.end(), d_columns_float[colname].begin(), d_col);
thrust::copy(d_col, d_col+mRecCount, h_columns_float[colname].begin());
for(unsigned int p = 0; p < partition_count; p++) {
str = file_name + "." + colname;
curr_file = str;
str += "." + to_string(total_segments-1);
unsigned int curr_cnt;
if (p < partition_count - 1)
curr_cnt = partition_recs;
else
curr_cnt = mCount - partition_recs*p;
fstream binary_file(str.c_str(),ios::out|ios::binary|fstream::app);
binary_file.write((char *)&curr_cnt, 4);
binary_file.write((char *)(h_columns_float[colname].data() + new_offset),curr_cnt*float_size);
new_offset = new_offset + partition_recs;
unsigned int comp_type = 3;
binary_file.write((char *)&comp_type, 4);
binary_file.close();
};
}
else {
fstream binary_file(str.c_str(),ios::out|ios::binary|fstream::app);
binary_file.write((char *)&mCount, 4);
binary_file.write((char *)(h_columns_float[colname].data() + offset),mCount*float_size);
unsigned int comp_type = 3;
binary_file.write((char *)&comp_type, 4);
binary_file.close();
};
};
}
else { //char
//populate char_hash
if(append && char_hash[colname].size() == 0) {
string s= file_name + "." + colname;
ifstream binary_file(s.c_str(), ios::binary | ios::ate);
if(binary_file) {
auto sz = binary_file.tellg();
binary_file.seekg(0, binary_file.beg);
char* strings = new char[sz];
binary_file.read(strings, sz);
binary_file.close();
//unsigned int ind = std::find(columnNames.begin(), columnNames.end(), colname) - columnNames.begin();
for (unsigned int z = 0 ; z < sz/char_size[colname]; z++) {
char_hash[colname][MurmurHash64A(&strings[z*char_size[colname]], char_size[colname], hash_seed)/2] = z;
};
delete [] strings;
};
};
if(!op_sort.empty()) {
unsigned int* h_permutation = new unsigned int[mRecCount];
thrust::copy(permutation.begin(), permutation.end(), h_permutation);
char* t = new char[char_size[colname]*mRecCount];
apply_permutation_char_host(h_columns_char[colname], h_permutation, mRecCount, t, char_size[colname]);
delete [] h_permutation;
thrust::copy(t, t+ char_size[colname]*mRecCount, h_columns_char[colname]);
delete [] t;
for(unsigned int p = 0; p < partition_count; p++) {
str = file_name + "." + colname;
curr_file = str;
str += "." + to_string(total_segments-1);
if (p < partition_count - 1)
compress_char(str, colname, partition_recs, new_offset, total_segments-1);
else
compress_char(str, colname, mCount - partition_recs*p, new_offset, total_segments-1);
new_offset = new_offset + partition_recs;
total_segments++;
};
}
else {
compress_char(str, colname, mCount, offset, total_segments-1);
};
};
if((check_type == 1 && fact_file_loaded) || (check_type == 1 && check_val == 0)) {
if(!op_sort.empty())
writeHeader(file_name, colname, total_segments-1);
else {
writeHeader(file_name, colname, total_segments);
};
};
total_segments = old_segments;
};
cudaFree(d);
if(!op_sort.empty()) {
total_segments = (old_segments-1)+partition_count;
};
permutation.resize(0);
permutation.shrink_to_fit();
}
void CudaSet::writeHeader(string file_name, string colname, unsigned int tot_segs) {
string str = file_name + "." + colname;
string ff = str;
str += ".header";
fstream binary_file(str.c_str(),ios::out|ios::binary|ios::trunc);
binary_file.write((char *)&total_count, 8);
binary_file.write((char *)&tot_segs, 4);
binary_file.write((char *)&total_max, 4);
binary_file.write((char *)&cnt_counts[ff], 4);
//cout << "HEADER1 " << total_count << " " << tot_segs << " " << total_max << endl;
binary_file.close();
};
void CudaSet::reWriteHeader(string file_name, string colname, unsigned int tot_segs, size_t newRecs, size_t maxRecs1) {
string str = file_name + "." + colname;
string ff = str;
str += ".header";
fstream binary_file(str.c_str(),ios::out|ios::binary|ios::trunc);
binary_file.write((char *)&newRecs, 8);
binary_file.write((char *)&tot_segs, 4);
binary_file.write((char *)&maxRecs1, 4);
//cout << "HEADER2 " << newRecs << endl;
binary_file.close();
};
void CudaSet::writeSortHeader(string file_name)
{
string str(file_name);
unsigned int idx;
if(!op_sort.empty()) {
str += ".sort";
fstream binary_file(str.c_str(),ios::out|ios::binary|ios::trunc);
idx = (unsigned int)op_sort.size();
binary_file.write((char *)&idx, 4);
queue<string> os(op_sort);
while(!os.empty()) {
if(verbose)
cout << "sorted on " << idx << endl;
idx = os.front().size();
binary_file.write((char *)&idx, 4);
binary_file.write(os.front().data(), idx);
os.pop();
};
binary_file.close();
}
else {
str += ".sort";
remove(str.c_str());
};
str = file_name;
if(!op_presort.empty()) {
str += ".presort";
fstream binary_file(str.c_str(),ios::out|ios::binary|ios::trunc);
idx = (unsigned int)op_presort.size();
binary_file.write((char *)&idx, 4);
queue<string> os(op_presort);
while(!os.empty()) {
idx = os.front().size();
binary_file.write((char *)&idx, 4);
binary_file.write(os.front().data(), idx);
os.pop();
};
binary_file.close();
}
else {
str += ".presort";
remove(str.c_str());
};
}
using namespace mgpu;
void CudaSet::Display(unsigned int limit, bool binary, bool term)
{
#define MAXCOLS 128
#define MAXFIELDSIZE 1400
//-- This should/will be converted to an array holding pointers of malloced sized structures--
char bigbuf[MAXCOLS * MAXFIELDSIZE];
memset(bigbuf, 0, MAXCOLS * MAXFIELDSIZE);
char *fields[MAXCOLS];
const char *dcolumns[MAXCOLS];
size_t mCount; // num records in play
bool print_all = 0;
string ss, str;
int rows = 0;
if(limit != 0 && limit < mRecCount)
mCount = limit;
else {
mCount = mRecCount;
print_all = 1;
};
cout << "mRecCount=" << mRecCount << " mcount = " << mCount << " term " << term << " limit=" << limit << " print_all=" << print_all << endl;
unsigned int cc =0;
unordered_map<string, FILE*> file_map;
unordered_map<string, unsigned int> len_map;
for(unsigned int i = 0; i < columnNames.size(); i++)
{
fields[cc] = &(bigbuf[cc*MAXFIELDSIZE]); // a hack to avoid malloc overheads - refine later
dcolumns[cc++] = columnNames[i].c_str();
if(string_map.find(columnNames[i]) != string_map.end()) {
auto s = string_map[columnNames[i]];
auto pos = s.find_first_of(".");
auto len = data_dict[s.substr(0, pos)][s.substr(pos+1)].col_length;
FILE *f;
f = fopen(string_map[columnNames[i]].c_str(), "rb");
file_map[string_map[columnNames[i]]] = f;
len_map[string_map[columnNames[i]]] = len;
};
};
// The goal here is to loop fast and avoid any double handling of outgoing data - pointers are good.
if(not_compressed && prm_d.size() == 0) {
for(unsigned int i=0; i < mCount; i++) { // for each record
for(unsigned int j=0; j < columnNames.size(); j++) { // for each col
if (type[columnNames[j]] != 1) {
if(string_map.find(columnNames[j]) == string_map.end()) {
if(decimal_zeroes[columnNames[j]]) {
str = std::to_string(h_columns_int[columnNames[j]][i]);
//cout << "decimals " << columnNames[j] << " " << decimal_zeroes[columnNames[j]] << " " << h_columns_int[columnNames[j]][i] << endl;
while(str.length() <= decimal_zeroes[columnNames[j]])
str = '0' + str;
str.insert(str.length()- decimal_zeroes[columnNames[j]], ".");
sprintf(fields[j], "%s", str.c_str());
}
else {
if(!ts_cols[columnNames[j]])
sprintf(fields[j], "%lld", (h_columns_int[columnNames[j]])[i] );
else {
time_t ts = (h_columns_int[columnNames[j]][i])/1000;
auto ti = gmtime(&ts);
char buffer[30];
auto rem = (h_columns_int[columnNames[j]][i])%1000;
strftime(buffer,30,"%Y-%m-%d %H.%M.%S", ti);
//fprintf(file_pr, "%s", buffer);
//fprintf(file_pr, ".%d", rem);
sprintf(fields[j], "%s.%d", buffer,rem);
/*time_t tt = h_columns_int[columnNames[j]][i];
auto ti = localtime(&tt);
char buffer[10];
strftime(buffer,80,"%Y-%m-%d", ti);
sprintf(fields[j], "%s", buffer);
*/
};
};
}
else {
fseek(file_map[string_map[columnNames[j]]], h_columns_int[columnNames[j]][i] * len_map[string_map[columnNames[j]]], SEEK_SET);
fread(fields[j], 1, len_map[string_map[columnNames[j]]], file_map[string_map[columnNames[j]]]);
fields[j][len_map[string_map[columnNames[j]]]] ='\0'; // zero terminate string
};
}
else
sprintf(fields[j], "%.2f", (h_columns_float[columnNames[j]])[i] );
};
row_cb(mColumnCount, (char **)fields, (char **)dcolumns);
rows++;
};
}
else {
queue<string> op_vx;
for(unsigned int i = 0; i < columnNames.size(); i++)
op_vx.push(columnNames[i]);
if(prm_d.size() || source) {
allocColumns(this, op_vx);
};
unsigned int curr_seg = 0;
size_t cnt = 0;
size_t curr_count, sum_printed = 0;
resize(maxRecs);
while(sum_printed < mCount || print_all) {
if(prm_d.size() || source) { // if host arrays are empty
copyColumns(this, op_vx, curr_seg, cnt);
size_t olRecs = mRecCount;
mRecCount = olRecs;
CopyToHost(0,mRecCount);
if(sum_printed + mRecCount <= mCount || print_all)
curr_count = mRecCount;
else
curr_count = mCount - sum_printed;
}
else
curr_count = mCount;
sum_printed = sum_printed + mRecCount;
for(unsigned int i=0; i < curr_count; i++) {
for(unsigned int j=0; j < columnNames.size(); j++) {
if (type[columnNames[j]] != 1) {
if(string_map.find(columnNames[j]) == string_map.end())
sprintf(fields[j], "%lld", (h_columns_int[columnNames[j]])[i] );
else {
fseek(file_map[string_map[columnNames[j]]], h_columns_int[columnNames[j]][i] * len_map[string_map[columnNames[j]]], SEEK_SET);
fread(fields[j], 1, len_map[string_map[columnNames[j]]], file_map[string_map[columnNames[j]]]);
fields[j][len_map[string_map[columnNames[j]]]] ='\0'; // zero terminate string
};
}
else
sprintf(fields[j], "%.2f", (h_columns_float[columnNames[j]])[i] );
};
row_cb(mColumnCount, (char **)fields, (char**)dcolumns);
rows++;
};
curr_seg++;
if(curr_seg == segCount)
print_all = 0;
};
}; // end else
for(auto it = file_map.begin(); it != file_map.end(); it++)
fclose(it->second);
}
void CudaSet::Store(const string file_name, const char* sep, const unsigned int limit, const bool binary, const bool append, const bool term)
{
if (mRecCount == 0 && binary == 1 && !term) { // write tails
for(unsigned int j=0; j < columnNames.size(); j++) {
writeHeader(file_name, columnNames[j], total_segments);
};
return;
};
size_t mCount;
bool print_all = 0;
string str;
if(limit != 0 && limit < mRecCount)
mCount = limit;
else {
mCount = mRecCount;
print_all = 1;
};
if(binary == 0) {
unordered_map<string, FILE*> file_map;
unordered_map<string, unsigned int> len_map;
string bf;
unsigned int max_len = 0;
for(unsigned int j=0; j < columnNames.size(); j++) {
if(string_map.find(columnNames[j]) != string_map.end()) {
auto s = string_map[columnNames[j]];
auto pos = s.find_first_of(".");
auto len = data_dict[s.substr(0, pos)][s.substr(pos+1)].col_length;
if(len > max_len)
max_len = len;
FILE *f;
f = fopen(string_map[columnNames[j]].c_str(), "rb");
file_map[string_map[columnNames[j]]] = f;
len_map[string_map[columnNames[j]]] = len;
};
};
bf.reserve(max_len);
FILE *file_pr;
if(!term) {
file_pr = fopen(file_name.c_str(), "w");
if (!file_pr)
cout << "Could not open file " << file_name << endl;
}
else
file_pr = stdout;
if(not_compressed && prm_d.size() == 0) {
for(unsigned int i=0; i < mCount; i++) {
for(unsigned int j=0; j < columnNames.size(); j++) {
if (type[columnNames[j]] != 1 ) {
if(string_map.find(columnNames[j]) == string_map.end()) {
if(decimal_zeroes[columnNames[j]]) {
str = std::to_string(h_columns_int[columnNames[j]][i]);
//cout << "decimals " << columnNames[j] << " " << decimal_zeroes[columnNames[j]] << " " << h_columns_int[columnNames[j]][i] << endl;
while(str.length() <= decimal_zeroes[columnNames[j]])
str = '0' + str;
str.insert(str.length()- decimal_zeroes[columnNames[j]], ".");
fprintf(file_pr, "%s", str.c_str());
}
else {
if(!ts_cols[columnNames[j]]) {
fprintf(file_pr, "%lld", (h_columns_int[columnNames[j]])[i]);
}
else {
time_t ts = (h_columns_int[columnNames[j]][i])/1000;
auto ti = gmtime(&ts);
char buffer[30];
auto rem = (h_columns_int[columnNames[j]][i])%1000;
strftime(buffer,30,"%Y-%m-%d %H.%M.%S", ti);
fprintf(file_pr, "%s", buffer);
fprintf(file_pr, ".%d", rem);
};
};
}
else {
fseek(file_map[string_map[columnNames[j]]], h_columns_int[columnNames[j]][i] * len_map[string_map[columnNames[j]]], SEEK_SET);
fread(&bf[0], 1, len_map[string_map[columnNames[j]]], file_map[string_map[columnNames[j]]]);
fprintf(file_pr, "%.*s", len_map[string_map[columnNames[j]]], bf.c_str());
};
fputs(sep, file_pr);
}
else {
fprintf(file_pr, "%.2f", (h_columns_float[columnNames[j]])[i]);
fputs(sep, file_pr);
}
};
if (i != mCount -1 )
fputs("\n",file_pr);
};
if(!term)
fclose(file_pr);
}
else {
queue<string> op_vx;
string ss;
for(unsigned int j=0; j < columnNames.size(); j++)
op_vx.push(columnNames[j]);
if(prm_d.size() || source) {
allocColumns(this, op_vx);
};
unsigned int curr_seg = 0;
size_t cnt = 0;
size_t curr_count, sum_printed = 0;
mRecCount = 0;
resize(maxRecs);
while(sum_printed < mCount || print_all) {
if(prm_d.size() || source) {
copyColumns(this, op_vx, curr_seg, cnt);
if(curr_seg == 0) {
if(limit != 0 && limit < mRecCount) {
mCount = limit;
print_all = 0;
}
else {
mCount = mRecCount;
print_all = 1;
};
};
// if host arrays are empty
size_t olRecs = mRecCount;
mRecCount = olRecs;
CopyToHost(0,mRecCount);
//cout << "start " << sum_printed << " " << mRecCount << " " << mCount << endl;
if(sum_printed + mRecCount <= mCount || print_all) {
curr_count = mRecCount;
}
else {
curr_count = mCount - sum_printed;
};
}
else {
curr_count = mCount;
};
sum_printed = sum_printed + mRecCount;
//cout << "sum printed " << sum_printed << " " << curr_count << " " << curr_seg << endl;
for(unsigned int i=0; i < curr_count; i++) {
for(unsigned int j=0; j < columnNames.size(); j++) {
if (type[columnNames[j]] != 1) {
if(string_map.find(columnNames[j]) == string_map.end()) {
if(decimal_zeroes[columnNames[j]]) {
str = std::to_string(h_columns_int[columnNames[j]][i]);
//cout << "decimals " << columnNames[j] << " " << decimal_zeroes[columnNames[j]] << " " << h_columns_int[columnNames[j]][i] << endl;
while(str.length() <= decimal_zeroes[columnNames[j]])
str = '0' + str;
str.insert(str.length()- decimal_zeroes[columnNames[j]], ".");
fprintf(file_pr, "%s", str.c_str());
}
else {
if(!ts_cols[columnNames[j]]) {
fprintf(file_pr, "%lld", (h_columns_int[columnNames[j]])[i]);
}
else {
time_t ts = (h_columns_int[columnNames[j]][i])/1000;
auto ti = gmtime(&ts);
char buffer[30];
auto rem = (h_columns_int[columnNames[j]][i])%1000;
strftime(buffer,30,"%Y-%m-%d %H.%M.%S", ti);
fprintf(file_pr, "%s", buffer);
fprintf(file_pr, ".%d", rem);
};
};
}
else {
fseek(file_map[string_map[columnNames[j]]], h_columns_int[columnNames[j]][i] * len_map[string_map[columnNames[j]]], SEEK_SET);
fread(&bf[0], 1, len_map[string_map[columnNames[j]]], file_map[string_map[columnNames[j]]]);
fprintf(file_pr, "%.*s", len_map[string_map[columnNames[j]]], bf.c_str());
};
fputs(sep, file_pr);
}
else {
fprintf(file_pr, "%.2f", (h_columns_float[columnNames[j]])[i]);
fputs(sep, file_pr);
};
};
if (i != mCount -1 && (curr_seg != segCount || i < curr_count))
fputs("\n",file_pr);
};
curr_seg++;
if(curr_seg == segCount)
print_all = 0;
};
if(!term) {
fclose(file_pr);
};
};
for(auto it = file_map.begin(); it != file_map.end(); it++)
fclose(it->second);
}
else {
//lets update the data dictionary
for(unsigned int j=0; j < columnNames.size(); j++) {
data_dict[file_name][columnNames[j]].col_type = type[columnNames[j]];
if(type[columnNames[j]] != 2) {
if(decimal[columnNames[j]])
data_dict[file_name][columnNames[j]].col_length = decimal_zeroes[columnNames[j]];
else
if (ts_cols[columnNames[j]])
data_dict[file_name][columnNames[j]].col_length = UINT_MAX;
else
data_dict[file_name][columnNames[j]].col_length = 0;
}
else
data_dict[file_name][columnNames[j]].col_length = char_size[columnNames[j]];
};
save_dict = 1;
if(text_source) { //writing a binary file using a text file as a source
compress(file_name, 0, 1, 0, mCount, append);
for(unsigned int i = 0; i< columnNames.size(); i++)
if(type[columnNames[i]] == 2)
deAllocColumnOnDevice(columnNames[i]);
}
else { //writing a binary file using a binary file as a source
fact_file_loaded = 1;
size_t offset = 0;
if(!not_compressed) { // records are compressed, for example after filter op.
//decompress to host
queue<string> op_vx;
for(unsigned int i = 0; i< columnNames.size(); i++) {
op_vx.push(columnNames[i]);
};
allocColumns(this, op_vx);
size_t oldCnt = mRecCount;
mRecCount = 0;
resize(oldCnt);
mRecCount = oldCnt;
for(unsigned int i = 0; i < segCount; i++) {
size_t cnt = 0;
copyColumns(this, op_vx, i, cnt);
CopyToHost(0, mRecCount);
offset = offset + mRecCount;
compress(file_name, 0, 0, i - (segCount-1), mRecCount, append);
};
}
else {
// now we have decompressed records on the host
//call setSegments and compress columns in every segment
segCount = (mRecCount/process_count + 1);
offset = 0;
for(unsigned int z = 0; z < segCount; z++) {
if(z < segCount-1) {
if(mRecCount < process_count) {
mCount = mRecCount;
}
else {
mCount = process_count;
}
}
else {
mCount = mRecCount - (segCount-1)*process_count;
};
compress(file_name, offset, 0, z - (segCount-1), mCount, append);
offset = offset + mCount;
};
};
};
};
}
void CudaSet::compress_char(const string file_name, const string colname, const size_t mCount, const size_t offset, const unsigned int segment)
{
unsigned int len = char_size[colname];
string h_name, i_name, file_no_seg = file_name.substr(0, file_name.find_last_of("."));
i_name = file_no_seg + "." + to_string(segment) + ".idx";
h_name = file_no_seg + "." + to_string(segment) + ".hash";
fstream b_file_str, loc_hashes;
fstream binary_file_h(h_name.c_str(),ios::out|ios::binary|ios::trunc);
binary_file_h.write((char *)&mCount, 4);
if(segment == 0) {
b_file_str.open(file_no_seg.c_str(),ios::out|ios::binary|ios::trunc);
}
else {
b_file_str.open(file_no_seg.c_str(),ios::out|ios::binary|ios::app);
};
if(h_columns_int.find(colname) == h_columns_int.end()) {
h_columns_int[colname] = thrust::host_vector<int_type >(mCount);
}
else {
if(h_columns_int[colname].size() < mCount)
h_columns_int[colname].resize(mCount);
};
if(d_columns_int.find(colname) == d_columns_int.end()) {
d_columns_int[colname] = thrust::device_vector<int_type >(mCount);
}
else {
if(d_columns_int[colname].size() < mCount)
d_columns_int[colname].resize(mCount);
};
size_t cnt;
long long int* hash_array = new long long int[mCount];
unordered_map<unsigned long long int, size_t>::iterator iter;
vector<int_type> test(mCount);
for (unsigned int i = 0 ; i < mCount; i++) {
hash_array[i] = MurmurHash64A(h_columns_char[colname] + (i+offset)*len, len, hash_seed)/2;
iter = char_hash[colname].find(hash_array[i]);
if(iter == char_hash[colname].end()) {
cnt = char_hash[colname].size();
char_hash[colname][hash_array[i]] = cnt;
b_file_str.write((char *)h_columns_char[colname] + (i+offset)*len, len);
//h_columns_int[colname][i] = cnt;
test[i] = cnt;
}
else {
//h_columns_int[colname][i] = iter->second;
test[i] = iter->second;
};
};
memcpy(h_columns_int[colname].data(), test.data(), mCount*8);
binary_file_h.write((char *)hash_array, 8*mCount);
delete [] hash_array;
thrust::device_vector<int_type> d_col(mCount);
thrust::copy(h_columns_int[colname].begin(), h_columns_int[colname].begin() + mCount, d_col.begin());
pfor_compress(thrust::raw_pointer_cast(d_col.data()), mCount*int_size, i_name, h_columns_int[colname], 0);
binary_file_h.close();
b_file_str.close();
};
bool first_time = 1;
size_t rec_sz = 0;
size_t process_piece;
bool CudaSet::LoadBigFile(FILE* file_p, thrust::device_vector<char>& d_readbuff, thrust::device_vector<char*>& dest,
thrust::device_vector<unsigned int>& ind, thrust::device_vector<unsigned int>& dest_len)
{
const char* sep = separator.c_str();
unsigned int maxx = cols.rbegin()->first;
map<unsigned int, string>::iterator it;
bool done = 0;
std::clock_t start1 = std::clock();
vector<int> types;
vector<int> cl;
types.push_back(0);
for(int i = 0; i < maxx; i++) {
auto iter = cols.find(i+1);
if(iter != cols.end()) {
types.push_back(type[iter->second]);
cl.push_back(iter->first-1);
}
else
types.push_back(0);
};
if(first_time) {
if(process_count*4 > getFreeMem()) {
process_piece = getFreeMem()/4;
}
else
process_piece = process_count;
readbuff = new char[process_piece+1];
d_readbuff.resize(process_piece+1);
cout << "set a piece to " << process_piece << " " << getFreeMem() << endl;
};
thrust::device_vector<unsigned int> ind_cnt(1);
thrust::device_vector<char> sepp(1);
sepp[0] = *sep;
long long int total_processed = 0;
size_t recs_processed = 0;
bool finished = 0;
thrust::device_vector<long long int> dev_pos;
long long int offset;
unsigned int cnt = 1;
const unsigned int max_len = 23;
while(!done) {
auto rb = fread(readbuff, 1, process_piece, file_p);
if(rb < process_piece) {
done = 1;
finished = 1;
fclose(file_p);
};
if(total_processed >= process_count)
done = 1;
thrust::fill(d_readbuff.begin(), d_readbuff.end(),0);
thrust::copy(readbuff, readbuff+rb, d_readbuff.begin());
auto curr_cnt = thrust::count(d_readbuff.begin(), d_readbuff.begin() + rb, '\n') - 1;
if(recs_processed == 0 && first_time) {
rec_sz = curr_cnt;
if(finished)
rec_sz++;
total_max = curr_cnt;
};
if(first_time) {
for(unsigned int i=0; i < columnNames.size(); i++) {
auto colname = columnNames[i];
if (type[colname] == 0) {
d_columns_int[colname].resize(d_columns_int[colname].size() + rec_sz);
h_columns_int[colname].resize(h_columns_int[colname].size() + rec_sz);
}
else
if (type[colname] == 1) {
d_columns_float[colname].resize(d_columns_float[colname].size() + rec_sz);
h_columns_float[colname].resize(h_columns_float[colname].size() + rec_sz);
}
else {
char* c = new char[cnt*rec_sz*char_size[columnNames[i]]];
if(recs_processed > 0) {
memcpy(c, h_columns_char[columnNames[i]], recs_processed*char_size[columnNames[i]]);
delete [] h_columns_char[columnNames[i]];
};
h_columns_char[columnNames[i]] = c;
if(recs_processed == 0) {
void* temp;
CUDA_SAFE_CALL(cudaMalloc((void **) &temp, char_size[columnNames[i]]*rec_sz));
cudaMemset(temp,0,char_size[columnNames[i]]*rec_sz);
d_columns_char[columnNames[i]] = (char*)temp;
};
};
if(recs_processed == 0) {
ind[i] = cl[i];
void* temp;
if(type[columnNames[i]] != 2) {
if(!ts_cols[columnNames[i]]) {
CUDA_SAFE_CALL(cudaMalloc((void **) &temp, max_len*rec_sz));
dest_len[i] = max_len;
}
else {
CUDA_SAFE_CALL(cudaMalloc((void **) &temp, 23*rec_sz));
dest_len[i] = 23;
}
}
else {
CUDA_SAFE_CALL(cudaMalloc((void **) &temp, char_size[columnNames[i]]*rec_sz));
dest_len[i] = char_size[columnNames[i]];
};
dest[i] = (char*)temp;
};
};
};
for(unsigned int i=0; i < columnNames.size(); i++) {
if(type[columnNames[i]] != 2) {
cudaMemset(dest[i],0,max_len*rec_sz);
}
else {
cudaMemset(dest[i],0,char_size[columnNames[i]]*rec_sz);
};
};
if(dev_pos.size() < curr_cnt+1)
dev_pos.resize(curr_cnt+1); //avoiding the unnecessary allocs
dev_pos[0] = -1;
thrust::copy_if(thrust::make_counting_iterator((unsigned long long int)0), thrust::make_counting_iterator((unsigned long long int)rb-1),
d_readbuff.begin(), dev_pos.begin()+1, _1 == '\n');
if(!finished) {
if(curr_cnt < rec_sz) {
offset = (dev_pos[curr_cnt] - rb)+1;
//cout << "PATH 1 " << dev_pos[curr_cnt] << " " << offset << endl;
fseek(file_p, offset, SEEK_CUR);
total_processed = total_processed + rb + offset;
mRecCount = curr_cnt;
}
else {
offset = (dev_pos[rec_sz] - rb)+1;
//cout << "PATH 2 " << dev_pos[rec_sz] << " " << offset << endl;
fseek(file_p, offset, SEEK_CUR);
total_processed = total_processed + rb + offset;
mRecCount = rec_sz;
};
}
else {
mRecCount = curr_cnt + 1;
};
thrust::counting_iterator<unsigned int> begin(0);
ind_cnt[0] = mColumnCount;
parse_functor ff((const char*)thrust::raw_pointer_cast(d_readbuff.data()),(char**)thrust::raw_pointer_cast(dest.data()), thrust::raw_pointer_cast(ind.data()),
thrust::raw_pointer_cast(ind_cnt.data()), thrust::raw_pointer_cast(sepp.data()), thrust::raw_pointer_cast(dev_pos.data()), thrust::raw_pointer_cast(dest_len.data()));
thrust::for_each(begin, begin + mRecCount, ff);
ind_cnt[0] = max_len;
for(int i =0; i < mColumnCount; i++) {
if(type[columnNames[i]] == 0) { //int
thrust::device_ptr<char> p1((char*)dest[i]);
if(p1[4] == '-') { //date
if(!ts_cols[columnNames[i]]) {
gpu_date date_ff((const char*)dest[i],(long long int*)thrust::raw_pointer_cast(d_columns_int[columnNames[i]].data()) + recs_processed);
thrust::for_each(begin, begin + mRecCount, date_ff);
}
else {
gpu_tdate date_ff((const char*)dest[i],(long long int*)thrust::raw_pointer_cast(d_columns_int[columnNames[i]].data()) + recs_processed);
thrust::for_each(begin, begin + mRecCount, date_ff);
}
}
else { //int
if(decimal[columnNames[i]]) {
thrust::device_vector<unsigned int> scale(1);
scale[0] = decimal_zeroes[columnNames[i]];
gpu_atold atold((const char*)dest[i],(long long int*)thrust::raw_pointer_cast(d_columns_int[columnNames[i]].data()) + recs_processed,
thrust::raw_pointer_cast(ind_cnt.data()), thrust::raw_pointer_cast(scale.data()));
thrust::for_each(begin, begin + mRecCount, atold);
}
else {
gpu_atoll atoll_ff((const char*)dest[i],(long long int*)thrust::raw_pointer_cast(d_columns_int[columnNames[i]].data()) + recs_processed,
thrust::raw_pointer_cast(ind_cnt.data()));
thrust::for_each(begin, begin + mRecCount, atoll_ff);
};
};
thrust::copy(d_columns_int[columnNames[i]].begin() + recs_processed, d_columns_int[columnNames[i]].begin()+recs_processed+mRecCount, h_columns_int[columnNames[i]].begin() + recs_processed);
}
else
if(type[columnNames[i]] == 1) {
gpu_atof atof_ff((const char*)dest[i],(double*)thrust::raw_pointer_cast(d_columns_float[columnNames[i]].data()) + recs_processed,
thrust::raw_pointer_cast(ind_cnt.data()));
thrust::for_each(begin, begin + mRecCount, atof_ff);
thrust::copy(d_columns_float[columnNames[i]].begin() + recs_processed, d_columns_float[columnNames[i]].begin()+recs_processed+mRecCount, h_columns_float[columnNames[i]].begin() + recs_processed);
}
else {//char is already done
thrust::device_ptr<char> p1((char*)dest[i]);
cudaMemcpy( h_columns_char[columnNames[i]] + char_size[columnNames[i]]*recs_processed, (void *)dest[i] , char_size[columnNames[i]]*mRecCount, cudaMemcpyDeviceToHost);
};
};
recs_processed = recs_processed + mRecCount;
cnt++;
};
if(finished) {
for(int i =0; i < mColumnCount; i++) {
if(dest[i]) {
cudaFree(dest[i]);
dest[i] = nullptr;
};
};
delete [] readbuff;
};
cout << "processed recs " << recs_processed << " " << getFreeMem() << endl;
first_time = 0;
mRecCount = recs_processed;
return finished;
};
void CudaSet::free() {
for(unsigned int i = 0; i < columnNames.size(); i++ ) {
if(type[columnNames[i]] == 0 && h_columns_int[columnNames[i]].size() ) {
h_columns_int[columnNames[i]].resize(0);
h_columns_int[columnNames[i]].shrink_to_fit();
}
else {
h_columns_float[columnNames[i]].resize(0);
h_columns_float[columnNames[i]].shrink_to_fit();
};
};
if(prm_d.size()) {
prm_d.resize(0);
prm_d.shrink_to_fit();
};
deAllocOnDevice();
};
void alloc_pool(unsigned int maxRecs) {
void* temp;
CUDA_SAFE_CALL(cudaMalloc((void **) &temp, 8*maxRecs));
alloced_mem.push_back(temp);
};
bool* CudaSet::logical_and(bool* column1, bool* column2)
{
thrust::device_ptr<bool> dev_ptr1(column1);
thrust::device_ptr<bool> dev_ptr2(column2);
thrust::transform(dev_ptr1, dev_ptr1+mRecCount, dev_ptr2, dev_ptr1, thrust::logical_and<bool>());
thrust::device_free(dev_ptr2);
return column1;
}
bool* CudaSet::logical_or(bool* column1, bool* column2)
{
thrust::device_ptr<bool> dev_ptr1(column1);
thrust::device_ptr<bool> dev_ptr2(column2);
thrust::transform(dev_ptr1, dev_ptr1+mRecCount, dev_ptr2, dev_ptr1, thrust::logical_or<bool>());
thrust::device_free(dev_ptr2);
return column1;
}
bool* CudaSet::compare(int_type s, int_type d, int_type op_type)
{
bool res;
if (op_type == 2) // >
if(d>s)
res = 1;
else
res = 0;
else
if (op_type == 1) // <
if(d<s)
res = 1;
else
res = 0;
else
if (op_type == 6) // >=
if(d>=s)
res = 1;
else
res = 0;
else
if (op_type == 5) // <=
if(d<=s)
res = 1;
else
res = 0;
else
if (op_type == 4)// =
if(d==s)
res = 1;
else
res = 0;
else // !=
if(d!=s)
res = 1;
else
res = 0;
thrust::device_ptr<bool> p = thrust::device_malloc<bool>(mRecCount);
thrust::sequence(p, p+mRecCount,res,(bool)0);
return thrust::raw_pointer_cast(p);
};
bool* CudaSet::compare(float_type s, float_type d, int_type op_type)
{
bool res;
if (op_type == 2) // >
if ((d-s) > EPSILON)
res = 1;
else
res = 0;
else
if (op_type == 1) // <
if ((s-d) > EPSILON)
res = 1;
else
res = 0;
else
if (op_type == 6) // >=
if (((d-s) > EPSILON) || (((d-s) < EPSILON) && ((d-s) > -EPSILON)))
res = 1;
else
res = 0;
else
if (op_type == 5) // <=
if (((s-d) > EPSILON) || (((d-s) < EPSILON) && ((d-s) > -EPSILON)))
res = 1;
else
res = 0;
else
if (op_type == 4)// =
if (((d-s) < EPSILON) && ((d-s) > -EPSILON))
res = 1;
else
res = 0;
else // !=
if (!(((d-s) < EPSILON) && ((d-s) > -EPSILON)))
res = 1;
else
res = 0;
thrust::device_ptr<bool> p = thrust::device_malloc<bool>(mRecCount);
thrust::sequence(p, p+mRecCount,res,(bool)0);
return thrust::raw_pointer_cast(p);
}
bool* CudaSet::compare(float_type* column1, float_type d, int_type op_type)
{
thrust::device_ptr<bool> res = thrust::device_malloc<bool>(mRecCount);
thrust::device_ptr<float_type> dev_ptr(column1);
if (op_type == 2) // >
thrust::transform(dev_ptr, dev_ptr+mRecCount, thrust::make_constant_iterator(d), res, f_greater());
else
if (op_type == 1) // <
thrust::transform(dev_ptr, dev_ptr+mRecCount, thrust::make_constant_iterator(d), res, f_less());
else
if (op_type == 6) // >=
thrust::transform(dev_ptr, dev_ptr+mRecCount, thrust::make_constant_iterator(d), res, f_greater_equal_to());
else
if (op_type == 5) // <=
thrust::transform(dev_ptr, dev_ptr+mRecCount, thrust::make_constant_iterator(d), res, f_less_equal());
else
if (op_type == 4)// =
thrust::transform(dev_ptr, dev_ptr+mRecCount, thrust::make_constant_iterator(d), res, f_equal_to());
else // !=
thrust::transform(dev_ptr, dev_ptr+mRecCount, thrust::make_constant_iterator(d), res, f_not_equal_to());
return thrust::raw_pointer_cast(res);
}
bool* CudaSet::compare(int_type* column1, int_type d, int_type op_type, unsigned int p1, unsigned int p2)
{
thrust::device_ptr<bool> temp = thrust::device_malloc<bool>(mRecCount);
thrust::device_ptr<int_type> dev_ptr(column1);
if(p2)
d = d*(int_type)pow(10, p2);
if (op_type == 2) // >
if(!p1)
thrust::transform(dev_ptr, dev_ptr+mRecCount, thrust::make_constant_iterator(d), temp, thrust::greater<int_type>());
else
thrust::transform(thrust::make_transform_iterator(dev_ptr, power_functor<int_type>(p1)), thrust::make_transform_iterator(dev_ptr+mRecCount, power_functor<int_type>(p1)), thrust::make_constant_iterator(d), temp, thrust::greater<int_type>());
else
if (op_type == 1) // <
if(!p1)
thrust::transform(dev_ptr, dev_ptr+mRecCount, thrust::make_constant_iterator(d), temp, thrust::less<int_type>());
else
thrust::transform(thrust::make_transform_iterator(dev_ptr, power_functor<int_type>(p1)), thrust::make_transform_iterator(dev_ptr+mRecCount, power_functor<int_type>(p1)), thrust::make_constant_iterator(d), temp, thrust::less<int_type>());
else
if (op_type == 6) // >=
if(!p1)
thrust::transform(dev_ptr, dev_ptr+mRecCount, thrust::make_constant_iterator(d), temp, thrust::greater_equal<int_type>());
else
thrust::transform(thrust::make_transform_iterator(dev_ptr, power_functor<int_type>(p1)), thrust::make_transform_iterator(dev_ptr+mRecCount, power_functor<int_type>(p1)), thrust::make_constant_iterator(d), temp, thrust::greater_equal<int_type>());
else
if (op_type == 5) // <=
if(!p1)
thrust::transform(dev_ptr, dev_ptr+mRecCount, thrust::make_constant_iterator(d), temp, thrust::less_equal<int_type>());
else
thrust::transform(thrust::make_transform_iterator(dev_ptr, power_functor<int_type>(p1)), thrust::make_transform_iterator(dev_ptr+mRecCount, power_functor<int_type>(p1)), thrust::make_constant_iterator(d), temp, thrust::less_equal<int_type>());
else
if (op_type == 4)// =
if(!p1)
thrust::transform(dev_ptr, dev_ptr+mRecCount, thrust::make_constant_iterator(d), temp, thrust::equal_to<int_type>());
else
thrust::transform(thrust::make_transform_iterator(dev_ptr, power_functor<int_type>(p1)), thrust::make_transform_iterator(dev_ptr+mRecCount, power_functor<int_type>(p1)), thrust::make_constant_iterator(d), temp, thrust::equal_to<int_type>());
else // !=
if(!p1)
thrust::transform(dev_ptr, dev_ptr+mRecCount, thrust::make_constant_iterator(d), temp, thrust::not_equal_to<int_type>());
else
thrust::transform(thrust::make_transform_iterator(dev_ptr, power_functor<int_type>(p1)), thrust::make_transform_iterator(dev_ptr+mRecCount, power_functor<int_type>(p1)), thrust::make_constant_iterator(d), temp, thrust::not_equal_to<int_type>());
return thrust::raw_pointer_cast(temp);
}
bool* CudaSet::compare(int_type* column1, int_type* column2, int_type op_type, unsigned int p1, unsigned int p2)
{
thrust::device_ptr<int_type> dev_ptr1(column1);
thrust::device_ptr<int_type> dev_ptr2(column2);
thrust::device_ptr<bool> temp = thrust::device_malloc<bool>(mRecCount);
if (op_type == 2) // >
if(!p1 && !p2) {
thrust::transform(dev_ptr1, dev_ptr1+mRecCount, dev_ptr2, temp, thrust::greater<int_type>());
}
else
if(p1 && p2)
thrust::transform(thrust::make_transform_iterator(dev_ptr1, power_functor<int_type>(p1)), thrust::make_transform_iterator(dev_ptr1+mRecCount, power_functor<int_type>(p1)), thrust::make_transform_iterator(dev_ptr2+mRecCount, power_functor<int_type>(p2)), temp, thrust::greater<int_type>());
else
if(p1)
thrust::transform(thrust::make_transform_iterator(dev_ptr1, power_functor<int_type>(p1)), thrust::make_transform_iterator(dev_ptr1+mRecCount, power_functor<int_type>(p1)), dev_ptr2, temp, thrust::greater<int_type>());
else
thrust::transform(dev_ptr1, dev_ptr1+mRecCount, thrust::make_transform_iterator(dev_ptr2+mRecCount, power_functor<int_type>(p2)), temp, thrust::greater<int_type>());
else
if (op_type == 1) // <
if(!p1 && !p2)
thrust::transform(dev_ptr1, dev_ptr1+mRecCount, dev_ptr2, temp, thrust::less<int_type>());
else
if(p1 && p2)
thrust::transform(thrust::make_transform_iterator(dev_ptr1, power_functor<int_type>(p1)), thrust::make_transform_iterator(dev_ptr1+mRecCount, power_functor<int_type>(p1)), thrust::make_transform_iterator(dev_ptr2+mRecCount, power_functor<int_type>(p2)), temp, thrust::less<int_type>());
else
if(p1)
thrust::transform(thrust::make_transform_iterator(dev_ptr1, power_functor<int_type>(p1)), thrust::make_transform_iterator(dev_ptr1+mRecCount, power_functor<int_type>(p1)), dev_ptr2, temp, thrust::less<int_type>());
else
thrust::transform(dev_ptr1, dev_ptr1+mRecCount, thrust::make_transform_iterator(dev_ptr2+mRecCount, power_functor<int_type>(p2)), temp, thrust::less<int_type>());
else
if (op_type == 6) // >=
if(!p1 && !p2)
thrust::transform(dev_ptr1, dev_ptr1+mRecCount, dev_ptr2, temp, thrust::greater_equal<int_type>());
else
if(p1 && p2)
thrust::transform(thrust::make_transform_iterator(dev_ptr1, power_functor<int_type>(p1)), thrust::make_transform_iterator(dev_ptr1+mRecCount, power_functor<int_type>(p1)), thrust::make_transform_iterator(dev_ptr2+mRecCount, power_functor<int_type>(p2)), temp, thrust::greater_equal<int_type>());
else
if(p1)
thrust::transform(thrust::make_transform_iterator(dev_ptr1, power_functor<int_type>(p1)), thrust::make_transform_iterator(dev_ptr1+mRecCount, power_functor<int_type>(p1)), dev_ptr2, temp, thrust::greater_equal<int_type>());
else
thrust::transform(dev_ptr1, dev_ptr1+mRecCount, thrust::make_transform_iterator(dev_ptr2+mRecCount, power_functor<int_type>(p2)), temp, thrust::greater_equal<int_type>());
else
if (op_type == 5) // <=
if(!p1 && !p2)
thrust::transform(dev_ptr1, dev_ptr1+mRecCount, dev_ptr2, temp, thrust::less_equal<int_type>());
else
if(p1 && p2)
thrust::transform(thrust::make_transform_iterator(dev_ptr1, power_functor<int_type>(p1)), thrust::make_transform_iterator(dev_ptr1+mRecCount, power_functor<int_type>(p1)), thrust::make_transform_iterator(dev_ptr2+mRecCount, power_functor<int_type>(p2)), temp, thrust::less_equal<int_type>());
else
if(p1)
thrust::transform(thrust::make_transform_iterator(dev_ptr1, power_functor<int_type>(p1)), thrust::make_transform_iterator(dev_ptr1+mRecCount, power_functor<int_type>(p1)), dev_ptr2, temp, thrust::less_equal<int_type>());
else
thrust::transform(dev_ptr1, dev_ptr1+mRecCount, thrust::make_transform_iterator(dev_ptr2+mRecCount, power_functor<int_type>(p2)), temp, thrust::less_equal<int_type>());
else
if (op_type == 4)// =
if(!p1 && !p2)
thrust::transform(dev_ptr1, dev_ptr1+mRecCount, dev_ptr2, temp, thrust::equal_to<int_type>());
else
if(p1 && p2)
thrust::transform(thrust::make_transform_iterator(dev_ptr1, power_functor<int_type>(p1)), thrust::make_transform_iterator(dev_ptr1+mRecCount, power_functor<int_type>(p1)), thrust::make_transform_iterator(dev_ptr2+mRecCount, power_functor<int_type>(p2)), temp, thrust::equal_to<int_type>());
else
if(p1)
thrust::transform(thrust::make_transform_iterator(dev_ptr1, power_functor<int_type>(p1)), thrust::make_transform_iterator(dev_ptr1+mRecCount, power_functor<int_type>(p1)), dev_ptr2, temp, thrust::equal_to<int_type>());
else
thrust::transform(dev_ptr1, dev_ptr1+mRecCount, thrust::make_transform_iterator(dev_ptr2+mRecCount, power_functor<int_type>(p2)), temp, thrust::equal_to<int_type>());
else // !=
if(!p1 && !p2)
thrust::transform(dev_ptr1, dev_ptr1+mRecCount, dev_ptr2, temp, thrust::not_equal_to<int_type>());
else
if(p1 && p2)
thrust::transform(thrust::make_transform_iterator(dev_ptr1, power_functor<int_type>(p1)), thrust::make_transform_iterator(dev_ptr1+mRecCount, power_functor<int_type>(p1)), thrust::make_transform_iterator(dev_ptr2+mRecCount, power_functor<int_type>(p2)), temp, thrust::not_equal_to<int_type>());
else
if(p1)
thrust::transform(thrust::make_transform_iterator(dev_ptr1, power_functor<int_type>(p1)), thrust::make_transform_iterator(dev_ptr1+mRecCount, power_functor<int_type>(p1)), dev_ptr2, temp, thrust::not_equal_to<int_type>());
else
thrust::transform(dev_ptr1, dev_ptr1+mRecCount, thrust::make_transform_iterator(dev_ptr2+mRecCount, power_functor<int_type>(p2)), temp, thrust::not_equal_to<int_type>());
return thrust::raw_pointer_cast(temp);
}
bool* CudaSet::compare(float_type* column1, float_type* column2, int_type op_type)
{
thrust::device_ptr<float_type> dev_ptr1(column1);
thrust::device_ptr<float_type> dev_ptr2(column2);
thrust::device_ptr<bool> temp = thrust::device_malloc<bool>(mRecCount);
if (op_type == 2) // >
thrust::transform(dev_ptr1, dev_ptr1+mRecCount, dev_ptr2, temp, f_greater());
else
if (op_type == 1) // <
thrust::transform(dev_ptr1, dev_ptr1+mRecCount, dev_ptr2, temp, f_less());
else
if (op_type == 6) // >=
thrust::transform(dev_ptr1, dev_ptr1+mRecCount, dev_ptr2, temp, f_greater_equal_to());
else
if (op_type == 5) // <=
thrust::transform(dev_ptr1, dev_ptr1+mRecCount, dev_ptr2, temp, f_less_equal());
else
if (op_type == 4)// =
thrust::transform(dev_ptr1, dev_ptr1+mRecCount, dev_ptr2, temp, f_equal_to());
else // !=
thrust::transform(dev_ptr1, dev_ptr1+mRecCount, dev_ptr2, temp, f_not_equal_to());
return thrust::raw_pointer_cast(temp);
}
bool* CudaSet::compare(float_type* column1, int_type* column2, int_type op_type)
{
thrust::device_ptr<float_type> dev_ptr1(column1);
thrust::device_ptr<int_type> dev_ptr(column2);
thrust::device_ptr<float_type> dev_ptr2 = thrust::device_malloc<float_type>(mRecCount);
thrust::device_ptr<bool> temp = thrust::device_malloc<bool>(mRecCount);
thrust::transform(dev_ptr, dev_ptr + mRecCount, dev_ptr2, long_to_float_type());
if (op_type == 2) // >
thrust::transform(dev_ptr1, dev_ptr1+mRecCount, dev_ptr2, temp, f_greater());
else
if (op_type == 1) // <
thrust::transform(dev_ptr1, dev_ptr1+mRecCount, dev_ptr2, temp, f_less());
else
if (op_type == 6) // >=
thrust::transform(dev_ptr1, dev_ptr1+mRecCount, dev_ptr2, temp, f_greater_equal_to());
else
if (op_type == 5) // <=
thrust::transform(dev_ptr1, dev_ptr1+mRecCount, dev_ptr2, temp, f_less_equal());
else
if (op_type == 4)// =
thrust::transform(dev_ptr1, dev_ptr1+mRecCount, dev_ptr2, temp, f_equal_to());
else // !=
thrust::transform(dev_ptr1, dev_ptr1+mRecCount, dev_ptr2, temp, f_not_equal_to());
thrust::device_free(dev_ptr2);
return thrust::raw_pointer_cast(temp);
}
float_type* CudaSet::op(int_type* column1, float_type* column2, string op_type, bool reverse)
{
if(alloced_mem.empty()) {
alloc_pool(maxRecs);
};
thrust::device_ptr<float_type> temp((float_type*)alloced_mem.back());
thrust::device_ptr<int_type> dev_ptr(column1);
thrust::transform(dev_ptr, dev_ptr + mRecCount, temp, long_to_float_type()); // in-place transformation
thrust::device_ptr<float_type> dev_ptr1(column2);
if(reverse == 0) {
if (op_type.compare("MUL") == 0)
thrust::transform(dev_ptr1, dev_ptr1+mRecCount, temp, temp, thrust::multiplies<float_type>());
else
if (op_type.compare("ADD") == 0)
thrust::transform(dev_ptr1, dev_ptr1+mRecCount, temp, temp, thrust::plus<float_type>());
else
if (op_type.compare("MINUS") == 0)
thrust::transform(dev_ptr1, dev_ptr1+mRecCount, temp, temp, thrust::minus<float_type>());
else
thrust::transform(dev_ptr1, dev_ptr1+mRecCount, temp, temp, thrust::divides<float_type>());
}
else {
if (op_type.compare("MUL") == 0)
thrust::transform(temp, temp+mRecCount, dev_ptr1, temp, thrust::multiplies<float_type>());
else
if (op_type.compare("ADD") == 0)
thrust::transform(temp, temp+mRecCount, dev_ptr1, temp, thrust::plus<float_type>());
else
if (op_type.compare("MINUS") == 0)
thrust::transform(temp, temp+mRecCount, dev_ptr1, temp, thrust::minus<float_type>());
else
thrust::transform(temp, temp+mRecCount, dev_ptr1, temp, thrust::divides<float_type>());
};
alloced_mem.pop_back();
return thrust::raw_pointer_cast(temp);
}
int_type* CudaSet::op(int_type* column1, int_type d, string op_type, bool reverse, unsigned int p1, unsigned int p2)
{
if(alloced_mem.empty()) {
alloc_pool(maxRecs);
};
//cout << "OP " << d << " " << op_type << " " << p1 << " " << p2 << endl;
thrust::device_ptr<int_type> temp((int_type*)alloced_mem.back());
thrust::device_ptr<int_type> dev_ptr1(column1);
unsigned int d1 = d;
if(p2)
d = d*(unsigned int)pow(10, p2);
if(reverse == 0) {
if (op_type.compare("MUL") == 0) {
thrust::transform(dev_ptr1, dev_ptr1+mRecCount, thrust::make_constant_iterator(d1), temp, thrust::multiplies<int_type>());
}
else
if (op_type.compare("ADD") == 0) {
if(!p1)
thrust::transform(dev_ptr1, dev_ptr1+mRecCount, thrust::make_constant_iterator(d*(unsigned int)pow(10, p2)), temp, thrust::plus<int_type>());
else
thrust::transform(thrust::make_transform_iterator(dev_ptr1, power_functor<int_type>(p1)), thrust::make_transform_iterator(dev_ptr1+mRecCount, power_functor<int_type>(p1)), thrust::make_constant_iterator(d), temp, thrust::plus<int_type>());
}
else
if (op_type.compare("MINUS") == 0) {
if(!p1)
thrust::transform(dev_ptr1, dev_ptr1+mRecCount, thrust::make_constant_iterator(d*(unsigned int)pow(10, p2)), temp, thrust::minus<int_type>());
else
thrust::transform(thrust::make_transform_iterator(dev_ptr1, power_functor<int_type>(p1)), thrust::make_transform_iterator(dev_ptr1+mRecCount, power_functor<int_type>(p1)), thrust::make_constant_iterator(d), temp, thrust::minus<int_type>());
}
else {
if(!p1)
thrust::transform(dev_ptr1, dev_ptr1+mRecCount, thrust::make_constant_iterator(d*(unsigned int)pow(10, p2)), temp, thrust::divides<int_type>());
else
thrust::transform(thrust::make_transform_iterator(dev_ptr1, power_functor<int_type>(p1)), thrust::make_transform_iterator(dev_ptr1+mRecCount, power_functor<int_type>(p1)), thrust::make_constant_iterator(d), temp, thrust::divides<int_type>());
}
}
else {
if (op_type.compare("MUL") == 0) {
if(!p1)
thrust::transform(thrust::make_constant_iterator(d), thrust::make_constant_iterator(d) + mRecCount, dev_ptr1, temp, thrust::multiplies<int_type>());
else
thrust::transform(thrust::make_constant_iterator(d), thrust::make_constant_iterator(d) + mRecCount, thrust::make_transform_iterator(dev_ptr1, power_functor<int_type>(p1)), temp, thrust::multiplies<int_type>());
}
else
if (op_type.compare("ADD") == 0) {
if(!p1)
thrust::transform(thrust::make_constant_iterator(d), thrust::make_constant_iterator(d) + mRecCount, dev_ptr1, temp, thrust::plus<int_type>());
else
thrust::transform(thrust::make_constant_iterator(d), thrust::make_constant_iterator(d) + mRecCount, thrust::make_transform_iterator(dev_ptr1, power_functor<int_type>(p1)), temp, thrust::plus<int_type>());
}
else
if (op_type.compare("MINUS") == 0) {
if(!p1)
thrust::transform(thrust::make_constant_iterator(d), thrust::make_constant_iterator(d) + mRecCount, dev_ptr1, temp, thrust::minus<int_type>());
else
thrust::transform(thrust::make_constant_iterator(d), thrust::make_constant_iterator(d) + mRecCount, thrust::make_transform_iterator(dev_ptr1, power_functor<int_type>(p1)), temp, thrust::minus<int_type>());
}
else {
if(!p1)
thrust::transform(thrust::make_constant_iterator(d), thrust::make_constant_iterator(d) + mRecCount, dev_ptr1, temp, thrust::divides<int_type>());
else
thrust::transform(thrust::make_constant_iterator(d), thrust::make_constant_iterator(d) + mRecCount, thrust::make_transform_iterator(dev_ptr1, power_functor<int_type>(p1)), temp, thrust::divides<int_type>());
};
};
alloced_mem.pop_back();
return thrust::raw_pointer_cast(temp);
}
int_type* CudaSet::op(int_type* column1, int_type* column2, string op_type, bool reverse, unsigned int p1, unsigned int p2)
{
if(alloced_mem.empty()) {
alloc_pool(maxRecs);
};
thrust::device_ptr<int_type> temp((int_type*)alloced_mem.back());
thrust::device_ptr<int_type> dev_ptr1(column1);
thrust::device_ptr<int_type> dev_ptr2(column2);
//cout << "OP " << op_type << " " << p1 << " " << p2 << " " << reverse << endl;
if(reverse == 0) {
if (op_type.compare("MUL") == 0) {
thrust::transform(dev_ptr1, dev_ptr1+mRecCount, dev_ptr2, temp, thrust::multiplies<int_type>());
}
else
if (op_type.compare("ADD") == 0) {
if(!p1 && !p2)
thrust::transform(dev_ptr1, dev_ptr1+mRecCount, dev_ptr2, temp, thrust::plus<int_type>());
else
if(p1 && p2) {
thrust::transform(thrust::make_transform_iterator(dev_ptr1, power_functor<int_type>(p1)), thrust::make_transform_iterator(dev_ptr1+mRecCount, power_functor<int_type>(p1)), thrust::make_transform_iterator(dev_ptr2, power_functor<int_type>(p2)), temp, thrust::plus<int_type>());
}
else
if (p1)
thrust::transform(thrust::make_transform_iterator(dev_ptr1, power_functor<int_type>(p1)), thrust::make_transform_iterator(dev_ptr1+mRecCount, power_functor<int_type>(p1)), dev_ptr2, temp, thrust::plus<int_type>());
else
thrust::transform(dev_ptr1, dev_ptr1+mRecCount, thrust::make_transform_iterator(dev_ptr2, power_functor<int_type>(p2)), temp, thrust::plus<int_type>());
}
else
if (op_type.compare("MINUS") == 0) {
if(!p1 && !p2)
thrust::transform(dev_ptr1, dev_ptr1+mRecCount, dev_ptr2, temp, thrust::minus<int_type>());
else
if(p1 && p2)
thrust::transform(thrust::make_transform_iterator(dev_ptr1, power_functor<int_type>(p1)), thrust::make_transform_iterator(dev_ptr1+mRecCount, power_functor<int_type>(p1)), thrust::make_transform_iterator(dev_ptr2, power_functor<int_type>(p2)), temp, thrust::minus<int_type>());
else
if (p1)
thrust::transform(thrust::make_transform_iterator(dev_ptr1, power_functor<int_type>(p1)), thrust::make_transform_iterator(dev_ptr1+mRecCount, power_functor<int_type>(p1)), dev_ptr2, temp, thrust::minus<int_type>());
else
thrust::transform(dev_ptr1, dev_ptr1+mRecCount, thrust::make_transform_iterator(dev_ptr2, power_functor<int_type>(p2)), temp, thrust::minus<int_type>());
}
else {
if(!p1 && !p2)
thrust::transform(dev_ptr1, dev_ptr1+mRecCount, dev_ptr2, temp, thrust::divides<int_type>());
else
if(p1 && p2)
thrust::transform(thrust::make_transform_iterator(dev_ptr1, power_functor<int_type>(p1)), thrust::make_transform_iterator(dev_ptr1+mRecCount, power_functor<int_type>(p1)), thrust::make_transform_iterator(dev_ptr2, power_functor<int_type>(p2)), temp, thrust::divides<int_type>());
else
if (p1)
thrust::transform(thrust::make_transform_iterator(dev_ptr1, power_functor<int_type>(p1)), thrust::make_transform_iterator(dev_ptr1+mRecCount, power_functor<int_type>(p1)), dev_ptr2, temp, thrust::divides<int_type>());
else
thrust::transform(dev_ptr1, dev_ptr1+mRecCount, thrust::make_transform_iterator(dev_ptr2, power_functor<int_type>(p2)), temp, thrust::divides<int_type>());
}
}
else {
if (op_type.compare("MUL") == 0) {
thrust::transform(dev_ptr2, dev_ptr2+mRecCount, dev_ptr1, temp, thrust::multiplies<int_type>());
}
else
if (op_type.compare("ADD") == 0) {
if(!p1 && !p2)
thrust::transform(dev_ptr2, dev_ptr2+mRecCount, dev_ptr1, temp, thrust::plus<int_type>());
else
if(p1 && p2)
thrust::transform(thrust::make_transform_iterator(dev_ptr2, power_functor<int_type>(p2)), thrust::make_transform_iterator(dev_ptr2+mRecCount, power_functor<int_type>(p2)), thrust::make_transform_iterator(dev_ptr1, power_functor<int_type>(p1)), temp, thrust::plus<int_type>());
else
if (p1)
thrust::transform(dev_ptr2, dev_ptr2+mRecCount, thrust::make_transform_iterator(dev_ptr1, power_functor<int_type>(p1)), temp, thrust::plus<int_type>());
else
thrust::transform(thrust::make_transform_iterator(dev_ptr2, power_functor<int_type>(p2)), thrust::make_transform_iterator(dev_ptr2+mRecCount, power_functor<int_type>(p2)), dev_ptr1, temp, thrust::plus<int_type>());
}
else
if (op_type.compare("MINUS") == 0) {
if(!p1 && !p2)
thrust::transform(dev_ptr2, dev_ptr2+mRecCount, dev_ptr1, temp, thrust::minus<int_type>());
else
if(p1 && p2)
thrust::transform(thrust::make_transform_iterator(dev_ptr2, power_functor<int_type>(p2)), thrust::make_transform_iterator(dev_ptr2+mRecCount, power_functor<int_type>(p2)), thrust::make_transform_iterator(dev_ptr1, power_functor<int_type>(p1)), temp, thrust::minus<int_type>());
else
if (p1)
thrust::transform(dev_ptr2, dev_ptr2+mRecCount, thrust::make_transform_iterator(dev_ptr1, power_functor<int_type>(p1)), temp, thrust::minus<int_type>());
else
thrust::transform(thrust::make_transform_iterator(dev_ptr2, power_functor<int_type>(p2)), thrust::make_transform_iterator(dev_ptr2+mRecCount, power_functor<int_type>(p2)), dev_ptr1, temp, thrust::minus<int_type>());
}
else {
if(!p1 && !p2)
thrust::transform(dev_ptr2, dev_ptr2+mRecCount, dev_ptr1, temp, thrust::divides<int_type>());
else
if(p1 && p2)
thrust::transform(thrust::make_transform_iterator(dev_ptr2, power_functor<int_type>(p2)), thrust::make_transform_iterator(dev_ptr2+mRecCount, power_functor<int_type>(p2)), thrust::make_transform_iterator(dev_ptr1, power_functor<int_type>(p1)), temp, thrust::divides<int_type>());
else
if (p1)
thrust::transform(dev_ptr2, dev_ptr2+mRecCount, thrust::make_transform_iterator(dev_ptr1, power_functor<int_type>(p1)), temp, thrust::divides<int_type>());
else
thrust::transform(thrust::make_transform_iterator(dev_ptr2, power_functor<int_type>(p2)), thrust::make_transform_iterator(dev_ptr2+mRecCount, power_functor<int_type>(p2)), dev_ptr1, temp, thrust::divides<int_type>());
}
}
alloced_mem.pop_back();
return thrust::raw_pointer_cast(temp);
}
float_type* CudaSet::op(float_type* column1, float_type* column2, string op_type, bool reverse)
{
if(alloced_mem.empty()) {
alloc_pool(maxRecs);
};
thrust::device_ptr<float_type> temp((float_type*)alloced_mem.back());
thrust::device_ptr<float_type> dev_ptr1(column1);
thrust::device_ptr<float_type> dev_ptr2(column2);
if(reverse == 0) {
if (op_type.compare("MUL") == 0)
thrust::transform(dev_ptr1, dev_ptr1+mRecCount, dev_ptr2, temp, thrust::multiplies<float_type>());
else
if (op_type.compare("ADD") == 0)
thrust::transform(dev_ptr1, dev_ptr1+mRecCount, dev_ptr2, temp, thrust::plus<float_type>());
else
if (op_type.compare("MINUS") == 0)
thrust::transform(dev_ptr1, dev_ptr1+mRecCount, dev_ptr2, temp, thrust::minus<float_type>());
else
thrust::transform(dev_ptr1, dev_ptr1+mRecCount, dev_ptr2, temp, thrust::divides<float_type>());
}
else {
if (op_type.compare("MUL") == 0)
thrust::transform(dev_ptr2, dev_ptr2+mRecCount, dev_ptr1, temp, thrust::multiplies<float_type>());
else
if (op_type.compare("ADD") == 0)
thrust::transform(dev_ptr2, dev_ptr2+mRecCount, dev_ptr1, temp, thrust::plus<float_type>());
else
if (op_type.compare("MINUS") == 0)
thrust::transform(dev_ptr2, dev_ptr2+mRecCount, dev_ptr1, temp, thrust::minus<float_type>());
else
thrust::transform(dev_ptr2, dev_ptr2+mRecCount, dev_ptr1, temp, thrust::divides<float_type>());
};
alloced_mem.pop_back();
return thrust::raw_pointer_cast(temp);
}
float_type* CudaSet::op(int_type* column1, float_type d, string op_type, bool reverse)
{
if(alloced_mem.empty()) {
alloc_pool(maxRecs);
};
thrust::device_ptr<float_type> temp((float_type*)alloced_mem.back());
thrust::fill(temp, temp+mRecCount, d);
thrust::device_ptr<int_type> dev_ptr(column1);
thrust::device_ptr<float_type> dev_ptr1 = thrust::device_malloc<float_type>(mRecCount);
thrust::transform(dev_ptr, dev_ptr + mRecCount, dev_ptr1, long_to_float_type());
if(reverse == 0) {
if (op_type.compare("MUL") == 0)
thrust::transform(dev_ptr1, dev_ptr1+mRecCount, temp, temp, thrust::multiplies<float_type>());
else
if (op_type.compare("ADD") == 0)
thrust::transform(dev_ptr1, dev_ptr1+mRecCount, temp, temp, thrust::plus<float_type>());
else
if (op_type.compare("MINUS") == 0)
thrust::transform(dev_ptr1, dev_ptr1+mRecCount, temp, temp, thrust::minus<float_type>());
else
thrust::transform(dev_ptr1, dev_ptr1+mRecCount, temp, temp, thrust::divides<float_type>());
}
else {
if (op_type.compare("MUL") == 0)
thrust::transform(temp, temp+mRecCount, dev_ptr1, temp, thrust::multiplies<float_type>());
else
if (op_type.compare("ADD") == 0)
thrust::transform(temp, temp+mRecCount, dev_ptr1, temp, thrust::plus<float_type>());
else
if (op_type.compare("MINUS") == 0)
thrust::transform(temp, temp+mRecCount, dev_ptr1, temp, thrust::minus<float_type>());
else
thrust::transform(temp, temp+mRecCount, dev_ptr1, temp, thrust::divides<float_type>());
};
thrust::device_free(dev_ptr1);
alloced_mem.pop_back();
return thrust::raw_pointer_cast(temp);
}
float_type* CudaSet::op(float_type* column1, float_type d, string op_type,bool reverse)
{
if(alloced_mem.empty()) {
alloc_pool(maxRecs);
};
thrust::device_ptr<float_type> temp((float_type*)alloced_mem.back());
thrust::device_ptr<float_type> dev_ptr1(column1);
if(reverse == 0) {
if (op_type.compare("MUL") == 0)
thrust::transform(dev_ptr1, dev_ptr1+mRecCount, thrust::make_constant_iterator(d), temp, thrust::multiplies<float_type>());
else
if (op_type.compare("ADD") == 0)
thrust::transform(dev_ptr1, dev_ptr1+mRecCount, thrust::make_constant_iterator(d), temp, thrust::plus<float_type>());
else
if (op_type.compare("MINUS") == 0)
thrust::transform(dev_ptr1, dev_ptr1+mRecCount, thrust::make_constant_iterator(d), temp, thrust::minus<float_type>());
else
thrust::transform(dev_ptr1, dev_ptr1+mRecCount, thrust::make_constant_iterator(d), temp, thrust::divides<float_type>());
}
else {
if (op_type.compare("MUL") == 0)
thrust::transform(thrust::make_constant_iterator(d), thrust::make_constant_iterator(d)+mRecCount, dev_ptr1, temp, thrust::multiplies<float_type>());
else
if (op_type.compare("ADD") == 0)
thrust::transform(thrust::make_constant_iterator(d), thrust::make_constant_iterator(d)+mRecCount, dev_ptr1, temp, thrust::plus<float_type>());
else
if (op_type.compare("MINUS") == 0)
thrust::transform(thrust::make_constant_iterator(d), thrust::make_constant_iterator(d)+mRecCount, dev_ptr1, temp, thrust::minus<float_type>());
else
thrust::transform(thrust::make_constant_iterator(d), thrust::make_constant_iterator(d)+mRecCount, dev_ptr1, temp, thrust::divides<float_type>());
};
alloced_mem.pop_back();
return (float_type*)thrust::raw_pointer_cast(temp);
}
char CudaSet::loadIndex(const string index_name, const unsigned int segment)
{
FILE* f;
unsigned int bits_encoded, fit_count, vals_count, sz, real_count;
void* d_str;
string f1 = index_name + "." + to_string(segment);
char res;
//interactive = 0;
if(interactive) {
if(index_buffers.find(f1) == index_buffers.end()) {
f = fopen (f1.c_str(), "rb" );
fseek(f, 0, SEEK_END);
long fileSize = ftell(f);
char* buff;
cudaHostAlloc(&buff, fileSize, cudaHostAllocDefault);
fseek(f, 0, SEEK_SET);
fread(buff, fileSize, 1, f);
fclose(f);
index_buffers[f1] = buff;
};
sz = ((unsigned int*)index_buffers[f1])[0];
idx_dictionary_int[index_name].clear();
for(unsigned int i = 0; i < sz; i++) {
idx_dictionary_int[index_name][((int_type*)(index_buffers[f1]+4+8*i))[0]] = i;
};
vals_count = ((unsigned int*)(index_buffers[f1]+4 +8*sz))[2];
real_count = ((unsigned int*)(index_buffers[f1]+4 +8*sz))[3];
mRecCount = real_count;
if(idx_vals.count(index_name) == 0) {
cudaMalloc((void **) &d_str, (vals_count+2)*int_size);
cudaMemcpy( d_str, (void *) &((index_buffers[f1]+4 +8*sz)[0]), (vals_count+2)*int_size, cudaMemcpyHostToDevice);
idx_vals[index_name] = (unsigned long long int*)d_str;
};
}
else {
f = fopen (f1.c_str(), "rb" );
fread(&sz, 4, 1, f);
int_type* d_array = new int_type[sz];
idx_dictionary_int[index_name].clear();
fread((void*)d_array, sz*int_size, 1, f);
for(unsigned int i = 0; i < sz; i++) {
idx_dictionary_int[index_name][d_array[i]] = i;
};
delete [] d_array;
fread(&fit_count, 4, 1, f);
fread(&bits_encoded, 4, 1, f);
fread(&vals_count, 4, 1, f);
fread(&real_count, 4, 1, f);
mRecCount = real_count;
unsigned long long int* int_array = new unsigned long long int[vals_count+2];
fseek ( f , -16 , SEEK_CUR );
fread((void*)int_array, 1, vals_count*8 + 16, f);
fread(&res, 1, 1, f);
fclose(f);
void* d_str;
cudaMalloc((void **) &d_str, (vals_count+2)*int_size);
cudaMemcpy( d_str, (void *) int_array, (vals_count+2)*int_size, cudaMemcpyHostToDevice);
if(idx_vals.count(index_name))
cudaFree(idx_vals[index_name]);
idx_vals[index_name] = (unsigned long long int*)d_str;
}
return res;
}
void CudaSet::initialize(queue<string> &nameRef, queue<string> &typeRef, queue<int> &sizeRef, queue<int> &colsRef, size_t Recs, string file_name) // compressed data for DIM tables
{
mColumnCount = (unsigned int)nameRef.size();
FILE* f;
string f1;
unsigned int cnt;
char buffer[4000];
string str;
not_compressed = 0;
mRecCount = Recs;
hostRecCount = Recs;
totalRecs = Recs;
load_file_name = file_name;
f1 = file_name + ".sort";
f = fopen (f1.c_str() , "rb" );
if(f) {
unsigned int sz, idx;
fread((char *)&sz, 4, 1, f);
for(unsigned int j = 0; j < sz; j++) {
fread((char *)&idx, 4, 1, f);
fread(buffer, idx, 1, f);
str.assign(buffer, idx);
sorted_fields.push(str);
if(verbose)
cout << "segment sorted on " << str << endl;
};
fclose(f);
};
f1 = file_name + ".presort";
f = fopen (f1.c_str() , "rb" );
if(f) {
unsigned int sz, idx;
fread((char *)&sz, 4, 1, f);
for(unsigned int j = 0; j < sz; j++) {
fread((char *)&idx, 4, 1, f);
fread(buffer, idx, 1, f);
str.assign(buffer, idx);
presorted_fields.push(str);
if(verbose)
cout << "presorted on " << str << endl;
};
fclose(f);
};
tmp_table = 0;
filtered = 0;
for(unsigned int i=0; i < mColumnCount; i++) {
//f1 = file_name + "." + nameRef.front() + ".0";
//f = fopen (f1.c_str() , "rb" );
//fread((char *)&bytes, 4, 1, f); //need to read metadata such as type and length
//fclose(f);
columnNames.push_back(nameRef.front());
cols[colsRef.front()] = nameRef.front();
if (((typeRef.front()).compare("decimal") == 0) || ((typeRef.front()).compare("int") == 0)) {
f1 = file_name + "." + nameRef.front() + ".0";
f = fopen (f1.c_str() , "rb" );
if(!f) {
cout << "Couldn't find field " << nameRef.front() << endl;
exit(0);
};
for(unsigned int j = 0; j < 6; j++)
fread((char *)&cnt, 4, 1, f);
fclose(f);
compTypes[nameRef.front()] = cnt;
};
if((typeRef.front()).compare("timestamp") == 0)
ts_cols[nameRef.front()] = 1;
else
ts_cols[nameRef.front()] = 0;
if ((typeRef.front()).compare("int") == 0 || (typeRef.front()).compare("timestamp") == 0) {
type[nameRef.front()] = 0;
decimal[nameRef.front()] = 0;
decimal_zeroes[nameRef.front()] = 0;
h_columns_int[nameRef.front()] = thrust::host_vector<int_type, pinned_allocator<int_type> >();
d_columns_int[nameRef.front()] = thrust::device_vector<int_type>();
}
else
if ((typeRef.front()).compare("float") == 0) {
type[nameRef.front()] = 1;
decimal[nameRef.front()] = 0;
h_columns_float[nameRef.front()] = thrust::host_vector<float_type, pinned_allocator<float_type> >();
d_columns_float[nameRef.front()] = thrust::device_vector<float_type >();
}
else
if ((typeRef.front()).compare("decimal") == 0) {
type[nameRef.front()] = 0;
decimal[nameRef.front()] = 1;
decimal_zeroes[nameRef.front()] = sizeRef.front();
h_columns_int[nameRef.front()] = thrust::host_vector<int_type, pinned_allocator<int_type> >();
d_columns_int[nameRef.front()] = thrust::device_vector<int_type>();
}
else {
type[nameRef.front()] = 2;
decimal[nameRef.front()] = 0;
h_columns_char[nameRef.front()] = nullptr;
d_columns_char[nameRef.front()] = nullptr;
char_size[nameRef.front()] = sizeRef.front();
string_map[nameRef.front()] = file_name + "." + nameRef.front();
};
nameRef.pop();
typeRef.pop();
sizeRef.pop();
colsRef.pop();
};
};
void CudaSet::initialize(queue<string> &nameRef, queue<string> &typeRef, queue<int> &sizeRef, queue<int> &colsRef, size_t Recs)
{
mColumnCount = (unsigned int)nameRef.size();
tmp_table = 0;
filtered = 0;
mRecCount = 0;
hostRecCount = Recs;
segCount = 0;
for(unsigned int i=0; i < mColumnCount; i++) {
columnNames.push_back(nameRef.front());
cols[colsRef.front()] = nameRef.front();
if((typeRef.front()).compare("timestamp") == 0)
ts_cols[nameRef.front()] = 1;
else
ts_cols[nameRef.front()] = 0;
if ((typeRef.front()).compare("int") == 0 || (typeRef.front()).compare("timestamp") == 0) {
type[nameRef.front()] = 0;
decimal[nameRef.front()] = 0;
decimal_zeroes[nameRef.front()] = 0;
h_columns_int[nameRef.front()] = thrust::host_vector<int_type, pinned_allocator<int_type> >();
d_columns_int[nameRef.front()] = thrust::device_vector<int_type>();
}
else
if ((typeRef.front()).compare("float") == 0) {
type[nameRef.front()] = 1;
decimal[nameRef.front()] = 0;
h_columns_float[nameRef.front()] = thrust::host_vector<float_type, pinned_allocator<float_type> >();
d_columns_float[nameRef.front()] = thrust::device_vector<float_type>();
}
else
if ((typeRef.front()).compare("decimal") == 0) {
type[nameRef.front()] = 0;
decimal[nameRef.front()] = 1;
decimal_zeroes[nameRef.front()] = sizeRef.front();
h_columns_int[nameRef.front()] = thrust::host_vector<int_type, pinned_allocator<int_type> >();
d_columns_int[nameRef.front()] = thrust::device_vector<int_type>();
}
else {
type[nameRef.front()] = 2;
decimal[nameRef.front()] = 0;
h_columns_char[nameRef.front()] = nullptr;
d_columns_char[nameRef.front()] = nullptr;
char_size[nameRef.front()] = sizeRef.front();
};
nameRef.pop();
typeRef.pop();
sizeRef.pop();
colsRef.pop();
};
};
void CudaSet::initialize(const size_t RecordCount, const unsigned int ColumnCount)
{
mRecCount = RecordCount;
hostRecCount = RecordCount;
mColumnCount = ColumnCount;
filtered = 0;
};
void CudaSet::initialize(queue<string> op_sel, const queue<string> op_sel_as)
{
mRecCount = 0;
mColumnCount = (unsigned int)op_sel.size();
segCount = 1;
not_compressed = 1;
filtered = 0;
col_aliases = op_sel_as;
unsigned int i = 0;
CudaSet *a;
while(!op_sel.empty()) {
for(auto it = varNames.begin(); it != varNames.end(); it++) {
a = it->second;
if(std::find(a->columnNames.begin(), a->columnNames.end(), op_sel.front()) != a->columnNames.end())
break;
};
type[op_sel.front()] = a->type[op_sel.front()];
cols[i] = op_sel.front();
decimal[op_sel.front()] = a->decimal[op_sel.front()];
decimal_zeroes[op_sel.front()] = a->decimal_zeroes[op_sel.front()];
columnNames.push_back(op_sel.front());
if (a->type[op_sel.front()] == 0) {
d_columns_int[op_sel.front()] = thrust::device_vector<int_type>();
//h_columns_int[op_sel.front()] = thrust::host_vector<int_type, pinned_allocator<int_type> >();
h_columns_int[op_sel.front()] = thrust::host_vector<int_type>();
}
else
if (a->type[op_sel.front()] == 1) {
d_columns_float[op_sel.front()] = thrust::device_vector<float_type>();
//h_columns_float[op_sel.front()] = thrust::host_vector<float_type, pinned_allocator<float_type> >();
h_columns_float[op_sel.front()] = thrust::host_vector<float_type>();
}
else {
h_columns_char[op_sel.front()] = nullptr;
d_columns_char[op_sel.front()] = nullptr;
char_size[op_sel.front()] = a->char_size[op_sel.front()];
};
i++;
op_sel.pop();
};
}
void CudaSet::initialize(CudaSet* a, CudaSet* b, queue<string> op_sel, queue<string> op_sel_as)
{
mRecCount = 0;
mColumnCount = 0;
queue<string> q_cnt(op_sel);
unsigned int i = 0;
set<string> field_names;
while(!q_cnt.empty()) {
if( std::find(a->columnNames.begin(), a->columnNames.end(), q_cnt.front()) != a->columnNames.end() ||
std::find(b->columnNames.begin(), b->columnNames.end(), q_cnt.front()) != b->columnNames.end()) {
field_names.insert(q_cnt.front());
};
q_cnt.pop();
}
mColumnCount = (unsigned int)field_names.size();
maxRecs = b->maxRecs;
segCount = 1;
filtered = 0;
not_compressed = 1;
col_aliases = op_sel_as;
i = 0;
while(!op_sel.empty()) {
if(std::find(columnNames.begin(), columnNames.end(), op_sel.front()) == columnNames.end()) {
if(std::find(a->columnNames.begin(), a->columnNames.end(), op_sel.front()) != a->columnNames.end()) {
cols[i] = op_sel.front();
decimal[op_sel.front()] = a->decimal[op_sel.front()];
columnNames.push_back(op_sel.front());
type[op_sel.front()] = a->type[op_sel.front()];
ts_cols[op_sel.front()] = a->ts_cols[op_sel.front()];
if (a->type[op_sel.front()] == 0) {
d_columns_int[op_sel.front()] = thrust::device_vector<int_type>();
h_columns_int[op_sel.front()] = thrust::host_vector<int_type, pinned_allocator<int_type> >();
if(a->string_map.find(op_sel.front()) != a->string_map.end()) {
string_map[op_sel.front()] = a->string_map[op_sel.front()];
};
decimal[op_sel.front()] = a->decimal[op_sel.front()];
decimal_zeroes[op_sel.front()] = a->decimal_zeroes[op_sel.front()];
}
else
if (a->type[op_sel.front()] == 1) {
d_columns_float[op_sel.front()] = thrust::device_vector<float_type>();
h_columns_float[op_sel.front()] = thrust::host_vector<float_type, pinned_allocator<float_type> >();
}
else {
h_columns_char[op_sel.front()] = nullptr;
d_columns_char[op_sel.front()] = nullptr;
char_size[op_sel.front()] = a->char_size[op_sel.front()];
string_map[op_sel.front()] = a->string_map[op_sel.front()];
};
i++;
}
else
if(std::find(b->columnNames.begin(), b->columnNames.end(), op_sel.front()) != b->columnNames.end()) {
columnNames.push_back(op_sel.front());
cols[i] = op_sel.front();
decimal[op_sel.front()] = b->decimal[op_sel.front()];
type[op_sel.front()] = b->type[op_sel.front()];
ts_cols[op_sel.front()] = b->ts_cols[op_sel.front()];
if (b->type[op_sel.front()] == 0) {
d_columns_int[op_sel.front()] = thrust::device_vector<int_type>();
h_columns_int[op_sel.front()] = thrust::host_vector<int_type, pinned_allocator<int_type> >();
if(b->string_map.find(op_sel.front()) != b->string_map.end()) {
string_map[op_sel.front()] = b->string_map[op_sel.front()];
};
decimal[op_sel.front()] = b->decimal[op_sel.front()];
decimal_zeroes[op_sel.front()] = b->decimal_zeroes[op_sel.front()];
}
else
if (b->type[op_sel.front()] == 1) {
d_columns_float[op_sel.front()] = thrust::device_vector<float_type>();
h_columns_float[op_sel.front()] = thrust::host_vector<float_type, pinned_allocator<float_type> >();
}
else {
h_columns_char[op_sel.front()] = nullptr;
d_columns_char[op_sel.front()] = nullptr;
char_size[op_sel.front()] = b->char_size[op_sel.front()];
string_map[op_sel.front()] = b->string_map[op_sel.front()];
};
i++;
}
}
op_sel.pop();
};
};
int_type reverse_op(int_type op_type)
{
if (op_type == 2) // >
return 1;
else
if (op_type == 1) // <
return 2;
else
if (op_type == 6) // >=
return 5;
else
if (op_type == 5) // <=
return 6;
else
return op_type;
}
size_t getFreeMem()
{
size_t available, total;
cudaMemGetInfo(&available, &total);
return available;
} ;
void allocColumns(CudaSet* a, queue<string> fields)
{
if(a->filtered) {
CudaSet* t;
if(a->filtered)
t = varNames[a->source_name];
else
t = a;
if(int_size*t->maxRecs > alloced_sz) {
if(alloced_sz) {
cudaFree(alloced_tmp);
};
cudaMalloc((void **) &alloced_tmp, int_size*t->maxRecs);
alloced_sz = int_size*t->maxRecs;
}
}
else {
while(!fields.empty()) {
if(var_exists(a, fields.front()) && !a->onDevice(fields.front())) {
a->allocColumnOnDevice(fields.front(), a->maxRecs);
}
fields.pop();
};
};
}
void gatherColumns(CudaSet* a, CudaSet* t, string field, unsigned int segment, size_t& count)
{
if(!a->onDevice(field)) {
a->allocColumnOnDevice(field, a->maxRecs);
};
if(a->prm_index == 'R') {
mygather(field, a, t, count, a->mRecCount);
}
else {
mycopy(field, a, t, count, t->mRecCount);
a->mRecCount = t->mRecCount;
};
}
void copyFinalize(CudaSet* a, queue<string> fields, bool ts)
{
set<string> uniques;
if(scratch.size() < a->mRecCount*8)
scratch.resize(a->mRecCount*8);
thrust::device_ptr<int_type> tmp((int_type*)thrust::raw_pointer_cast(scratch.data()));
while(!fields.empty()) {
if (uniques.count(fields.front()) == 0 && var_exists(a, fields.front()) && cpy_bits.find(fields.front()) != cpy_bits.end() && (!a->ts_cols[fields.front()] || ts)) {
if(cpy_bits[fields.front()] == 8) {
if(a->type[fields.front()] != 1) {
thrust::device_ptr<unsigned char> src((unsigned char*)thrust::raw_pointer_cast(a->d_columns_int[fields.front()].data()));
thrust::transform(src, src+a->mRecCount, tmp, to_int64<unsigned char>());
}
else {
thrust::device_ptr<unsigned char> src((unsigned char*)thrust::raw_pointer_cast(a->d_columns_float[fields.front()].data()));
thrust::transform(src, src+a->mRecCount, tmp, to_int64<unsigned char>());
};
}
else
if(cpy_bits[fields.front()] == 16) {
if(a->type[fields.front()] != 1) {
thrust::device_ptr<unsigned short int> src((unsigned short int*)thrust::raw_pointer_cast(a->d_columns_int[fields.front()].data()));
thrust::transform(src, src+a->mRecCount, tmp, to_int64<unsigned short>());
}
else {
thrust::device_ptr<unsigned short int> src((unsigned short int*)thrust::raw_pointer_cast(a->d_columns_float[fields.front()].data()));
thrust::transform(src, src+a->mRecCount, tmp, to_int64<unsigned short>());
};
}
else
if(cpy_bits[fields.front()] == 32) {
if(a->type[fields.front()] != 1) {
thrust::device_ptr<unsigned int> src((unsigned int*)thrust::raw_pointer_cast(a->d_columns_int[fields.front()].data()));
thrust::transform(src, src+a->mRecCount, tmp, to_int64<unsigned int>());
}
else {
thrust::device_ptr<unsigned int> src((unsigned int*)thrust::raw_pointer_cast(a->d_columns_float[fields.front()].data()));
thrust::transform(src, src+a->mRecCount, tmp, to_int64<unsigned int>());
};
}
else {
if(a->type[fields.front()] != 1) {
thrust::device_ptr<int_type> src((int_type*)thrust::raw_pointer_cast(a->d_columns_int[fields.front()].data()));
thrust::copy(src, src+a->mRecCount, tmp);
}
else {
thrust::device_ptr<int_type> src((int_type*)thrust::raw_pointer_cast(a->d_columns_float[fields.front()].data()));
thrust::copy(src, src+a->mRecCount, tmp);
};
};
thrust::constant_iterator<int_type> iter(cpy_init_val[fields.front()]);
if(a->type[fields.front()] != 1) {
thrust::transform(tmp, tmp + a->mRecCount, iter, a->d_columns_int[fields.front()].begin(), thrust::plus<int_type>());
}
else {
thrust::device_ptr<int_type> dest((int_type*)thrust::raw_pointer_cast(a->d_columns_float[fields.front()].data()));
thrust::transform(tmp, tmp + a->mRecCount, iter, dest, thrust::plus<int_type>());
thrust::transform(dest, dest+a->mRecCount, a->d_columns_float[fields.front()].begin(), long_to_float());
};
};
uniques.insert(fields.front());
fields.pop();
};
}
void copyColumns(CudaSet* a, queue<string> fields, unsigned int segment, size_t& count, bool rsz, bool flt)
{
//std::clock_t start1 = std::clock();
set<string> uniques;
if(a->filtered) { //filter the segment
if(flt) {
filter_op(a->fil_s, a->fil_f, segment);
};
if(rsz && a->mRecCount) {
queue<string> fields1(fields);
while(!fields1.empty()) {
a->resizeDeviceColumn(a->devRecCount + a->mRecCount, fields1.front());
fields1.pop();
};
a->devRecCount = a->devRecCount + a->mRecCount;
};
};
cpy_bits.clear();
cpy_init_val.clear();
auto f(fields);
while(!fields.empty()) {
if (uniques.count(fields.front()) == 0 && var_exists(a, fields.front())) {
if(a->filtered) {
if(a->mRecCount) {
CudaSet *t = varNames[a->source_name];
alloced_switch = 1;
t->CopyColumnToGpu(fields.front(), segment);
gatherColumns(a, t, fields.front(), segment, count);
alloced_switch = 0;
};
}
else {
if(a->mRecCount) {
a->CopyColumnToGpu(fields.front(), segment, count);
};
};
uniques.insert(fields.front());
};
fields.pop();
};
//std::cout<< "copy time " << ( ( std::clock() - start1 ) / (double)CLOCKS_PER_SEC ) <<'\n';
}
void mygather(string colname, CudaSet* a, CudaSet* t, size_t offset, size_t g_size)
{
if(t->type[colname] != 1 ) {
if(cpy_bits.find(colname) != cpy_bits.end()) { // non-delta compression
if(cpy_bits[colname] == 8) {
thrust::device_ptr<unsigned char> d_col_source((unsigned char*)alloced_tmp);
thrust::device_ptr<unsigned char> d_col_dest((unsigned char*)thrust::raw_pointer_cast(a->d_columns_int[colname].data()));
thrust::gather(a->prm_d.begin(), a->prm_d.begin() + g_size, d_col_source, d_col_dest + offset);
}
else
if(cpy_bits[colname] == 16) {
thrust::device_ptr<unsigned short int> d_col_source((unsigned short int*)alloced_tmp);
thrust::device_ptr<unsigned short int> d_col_dest((unsigned short int*)thrust::raw_pointer_cast(a->d_columns_int[colname].data()));
thrust::gather(a->prm_d.begin(), a->prm_d.begin() + g_size, d_col_source, d_col_dest + offset);
}
else
if(cpy_bits[colname] == 32) {
thrust::device_ptr<unsigned int> d_col_source((unsigned int*)alloced_tmp);
thrust::device_ptr<unsigned int> d_col_dest((unsigned int*)thrust::raw_pointer_cast(a->d_columns_int[colname].data()));
thrust::gather(a->prm_d.begin(), a->prm_d.begin() + g_size, d_col_source, d_col_dest + offset);
}
else
if(cpy_bits[colname] == 64) {
thrust::device_ptr<int_type> d_col((int_type*)alloced_tmp);
thrust::gather(a->prm_d.begin(), a->prm_d.begin() + g_size, d_col, a->d_columns_int[colname].begin() + offset);
};
}
else {
thrust::device_ptr<int_type> d_col((int_type*)alloced_tmp);
thrust::gather(a->prm_d.begin(), a->prm_d.begin() + g_size, d_col, a->d_columns_int[colname].begin() + offset);
};
}
else {
if(cpy_bits.find(colname) != cpy_bits.end()) { // non-delta compression
if(cpy_bits[colname] == 8) {
thrust::device_ptr<unsigned char> d_col_source((unsigned char*)alloced_tmp);
thrust::device_ptr<unsigned char> d_col_dest((unsigned char*)thrust::raw_pointer_cast(a->d_columns_float[colname].data()));
thrust::gather(a->prm_d.begin(), a->prm_d.begin() + g_size, d_col_source, d_col_dest + offset);
}
else
if(cpy_bits[colname] == 16) {
thrust::device_ptr<unsigned short int> d_col_source((unsigned short int*)alloced_tmp);
thrust::device_ptr<unsigned short int> d_col_dest((unsigned short int*)thrust::raw_pointer_cast(a->d_columns_float[colname].data()));
thrust::gather(a->prm_d.begin(), a->prm_d.begin() + g_size, d_col_source, d_col_dest + offset);
}
else
if(cpy_bits[colname] == 32) {
thrust::device_ptr<unsigned int> d_col_source((unsigned int*)alloced_tmp);
thrust::device_ptr<unsigned int> d_col_dest((unsigned int*)thrust::raw_pointer_cast(a->d_columns_float[colname].data()));
thrust::gather(a->prm_d.begin(), a->prm_d.begin() + g_size, d_col_source, d_col_dest + offset);
}
else
if(cpy_bits[colname] == 64) {
thrust::device_ptr<int_type> d_col((int_type*)alloced_tmp);
thrust::gather(a->prm_d.begin(), a->prm_d.begin() + g_size, d_col, a->d_columns_float[colname].begin() + offset);
};
}
else {
thrust::device_ptr<float_type> d_col((float_type*)alloced_tmp);
thrust::gather(a->prm_d.begin(), a->prm_d.begin() + g_size, d_col, a->d_columns_float[colname].begin() + offset);
};
}
};
void mycopy(string colname, CudaSet* a, CudaSet* t, size_t offset, size_t g_size)
{
if(t->type[colname] != 1) {
if(cpy_bits.find(colname) != cpy_bits.end()) { // non-delta compression
if(cpy_bits[colname] == 8) {
thrust::device_ptr<unsigned char> d_col_source((unsigned char*)alloced_tmp);
thrust::device_ptr<unsigned char> d_col_dest((unsigned char*)thrust::raw_pointer_cast(a->d_columns_int[colname].data()));
thrust::copy(d_col_source, d_col_source + g_size, d_col_dest + offset);
}
else
if(cpy_bits[colname] == 16) {
thrust::device_ptr<short int> d_col_source((short int*)alloced_tmp);
thrust::device_ptr<short int> d_col_dest((short int*)thrust::raw_pointer_cast(a->d_columns_int[colname].data()+offset));
thrust::copy(d_col_source, d_col_source + g_size, d_col_dest + offset);
}
else
if(cpy_bits[colname] == 32) {
thrust::device_ptr<unsigned int> d_col_source((unsigned int*)alloced_tmp);
thrust::device_ptr<unsigned int> d_col_dest((unsigned int*)thrust::raw_pointer_cast(a->d_columns_int[colname].data()));
thrust::copy(d_col_source, d_col_source + g_size, d_col_dest + offset);
}
else
if(cpy_bits[colname] == 64) {
thrust::device_ptr<int_type> d_col_source((int_type*)alloced_tmp);
thrust::copy(d_col_source, d_col_source + g_size, a->d_columns_int[colname].begin() + offset);
};
}
else {
thrust::device_ptr<int_type> d_col((int_type*)alloced_tmp);
thrust::copy(d_col, d_col + g_size, a->d_columns_int[colname].begin() + offset);
};
}
else {
if(cpy_bits.find(colname) != cpy_bits.end()) { // non-delta compression
if(cpy_bits[colname] == 8) {
thrust::device_ptr<unsigned char> d_col_source((unsigned char*)alloced_tmp);
thrust::device_ptr<unsigned char> d_col_dest((unsigned char*)thrust::raw_pointer_cast(a->d_columns_float[colname].data()));
thrust::copy(d_col_source, d_col_source + g_size, d_col_dest + offset);
}
else
if(cpy_bits[colname] == 16) {
thrust::device_ptr<short int> d_col_source((short int*)alloced_tmp);
thrust::device_ptr<short int> d_col_dest((short int*)thrust::raw_pointer_cast(a->d_columns_float[colname].data()+offset));
thrust::copy(d_col_source, d_col_source + g_size, d_col_dest + offset);
}
else
if(cpy_bits[colname] == 32) {
thrust::device_ptr<unsigned int> d_col_source((unsigned int*)alloced_tmp);
thrust::device_ptr<unsigned int> d_col_dest((unsigned int*)thrust::raw_pointer_cast(a->d_columns_float[colname].data()));
thrust::copy(d_col_source, d_col_source + g_size, d_col_dest + offset);
}
else
if(cpy_bits[colname] == 64) {
thrust::device_ptr<int_type> d_col_source((int_type*)alloced_tmp);
thrust::copy(d_col_source, d_col_source + g_size, a->d_columns_float[colname].begin() + offset);
};
}
else {
thrust::device_ptr<float_type> d_col((float_type*)alloced_tmp);
thrust::copy(d_col, d_col + g_size, a->d_columns_float[colname].begin() + offset);
};
};
};
size_t load_queue(queue<string> c1, CudaSet* right, string f2, size_t &rcount,
unsigned int start_segment, unsigned int end_segment, bool rsz, bool flt)
{
queue<string> cc;
while(!c1.empty()) {
if(std::find(right->columnNames.begin(), right->columnNames.end(), c1.front()) != right->columnNames.end()) {
if(f2 != c1.front() ) {
cc.push(c1.front());
};
};
c1.pop();
};
if(std::find(right->columnNames.begin(), right->columnNames.end(), f2) != right->columnNames.end()) {
cc.push(f2);
};
if(right->filtered) {
allocColumns(right, cc);
};
rcount = right->maxRecs;
queue<string> ct(cc);
while(!ct.empty()) {
if(right->filtered && rsz) {
right->mRecCount = 0;
}
else {
right->allocColumnOnDevice(ct.front(), rcount*right->segCount);
};
ct.pop();
};
size_t cnt_r = 0;
right->devRecCount = 0;
for(unsigned int i = start_segment; i < end_segment; i++) {
if(!right->filtered)
copyColumns(right, cc, i, cnt_r, rsz, 0);
else
copyColumns(right, cc, i, cnt_r, rsz, flt);
cnt_r = cnt_r + right->mRecCount;
};
right->mRecCount = cnt_r;
return cnt_r;
}
size_t max_char(CudaSet* a)
{
size_t max_char1 = 8;
for(unsigned int i = 0; i < a->columnNames.size(); i++) {
if(a->type[a->columnNames[i]] == 2) {
if (a->char_size[a->columnNames[i]] > max_char1)
max_char1 = a->char_size[a->columnNames[i]];
}
else
if(a->type[a->columnNames[i]] == 0 && a->string_map.find(a->columnNames[i]) != a->string_map.end()) {
auto s = a->string_map[a->columnNames[i]];
auto pos = s.find_first_of(".");
auto len = data_dict[s.substr(0, pos)][s.substr(pos+1)].col_length;
if (len > max_char1)
max_char1 = len;
};
};
return max_char1;
};
size_t max_char(CudaSet* a, queue<string> field_names)
{
size_t max_char = 8;
while (!field_names.empty()) {
if (a->type[field_names.front()] == 2) {
if (a->char_size[field_names.front()] > max_char)
max_char = a->char_size[field_names.front()];
};
field_names.pop();
};
return max_char;
};
void setSegments(CudaSet* a, queue<string> cols)
{
size_t mem_available = getFreeMem();
size_t tot_sz = 0;
while(!cols.empty()) {
if(a->type[cols.front()] != 2)
tot_sz = tot_sz + int_size;
else
tot_sz = tot_sz + a->char_size[cols.front()];
cols.pop();
};
if(a->mRecCount*tot_sz > mem_available/3) { //default is 3
a->segCount = (a->mRecCount*tot_sz)/(mem_available/5) + 1;
a->maxRecs = (a->mRecCount/a->segCount)+1;
};
};
void update_permutation_char_host(char* key, unsigned int* permutation, size_t RecCount, string SortType, char* tmp, unsigned int len)
{
str_gather_host(permutation, RecCount, (void*)key, (void*)tmp, len);
if (SortType.compare("DESC") == 0 )
str_sort_host(tmp, RecCount, permutation, 1, len);
else
str_sort_host(tmp, RecCount, permutation, 0, len);
}
void apply_permutation_char(char* key, unsigned int* permutation, size_t RecCount, char* tmp, unsigned int len)
{
// copy keys to temporary vector
cudaMemcpy( (void*)tmp, (void*) key, RecCount*len, cudaMemcpyDeviceToDevice);
// permute the keys
str_gather((void*)permutation, RecCount, (void*)tmp, (void*)key, len);
}
void apply_permutation_char_host(char* key, unsigned int* permutation, size_t RecCount, char* res, unsigned int len)
{
str_gather_host(permutation, RecCount, (void*)key, (void*)res, len);
}
void filter_op(const char *s, const char *f, unsigned int segment)
{
CudaSet *a, *b;
a = varNames.find(f)->second;
a->name = f;
//std::clock_t start1 = std::clock();
if(a->mRecCount == 0 && !a->filtered) {
b = new CudaSet(0,1);
}
else {
if(verbose)
cout << "FILTER " << s << " " << f << " " << getFreeMem() << '\xd';
b = varNames[s];
b->name = s;
b->string_map = a->string_map;
size_t cnt = 0;
b->sorted_fields = a->sorted_fields;
b->ts_cols = a->ts_cols;
allocColumns(a, b->fil_value);
if (b->prm_d.size() == 0) {
b->prm_d.resize(a->maxRecs);
};
cout << endl << "MAP CHECK start " << segment << endl;
char map_check = zone_map_check(b->fil_type,b->fil_value,b->fil_nums, b->fil_nums_f, b->fil_nums_precision, a, segment);
cout << endl << "MAP CHECK segment " << segment << " " << map_check << endl;
if(map_check == 'R') {
auto old_ph = phase_copy;
phase_copy = 0;
copyColumns(a, b->fil_value, segment, cnt);
phase_copy = old_ph;
bool* res = filter(b->fil_type,b->fil_value,b->fil_nums, b->fil_nums_f, b->fil_nums_precision, a, segment);
thrust::device_ptr<bool> bp((bool*)res);
b->prm_index = 'R';
b->mRecCount = thrust::count(bp, bp + (unsigned int)a->mRecCount, 1);
thrust::copy_if(thrust::make_counting_iterator((unsigned int)0), thrust::make_counting_iterator((unsigned int)a->mRecCount),
bp, b->prm_d.begin(), thrust::identity<bool>());
cudaFree(res);
}
else {
b->prm_index = map_check;
if(map_check == 'A')
b->mRecCount = a->mRecCount;
else
b->mRecCount = 0;
};
if(segment == a->segCount-1)
a->deAllocOnDevice();
}
if(verbose)
cout << endl << "filter result " << b->mRecCount << endl;
}
size_t load_right(CudaSet* right, string f2, queue<string> op_g, queue<string> op_alt, size_t& rcount, unsigned int start_seg, unsigned int end_seg) {
size_t cnt_r = 0;
//if join is on strings then add integer columns to left and right tables and modify colInd1 and colInd2
// need to allocate all right columns
if(right->not_compressed) {
queue<string> op_alt1;
op_alt1.push(f2);
cnt_r = load_queue(op_alt1, right, "", rcount, start_seg, end_seg, 1, 1);
queue<string> op_alt2;
while(!op_alt.empty()) {
if(f2.compare(op_alt.front())) {
if (std::find(right->columnNames.begin(), right->columnNames.end(), op_alt.front()) != right->columnNames.end()) {
op_alt2.push(op_alt.front());
};
};
op_alt.pop();
};
if(!op_alt2.empty())
cnt_r = load_queue(op_alt2, right, "", rcount, start_seg, end_seg, 0, 0);
}
else {
cnt_r = load_queue(op_alt, right, f2, rcount, start_seg, end_seg, 1, 1);
};
return cnt_r;
};
void insert_records(const char* f, const char* s) {
char buf[4096];
size_t size, maxRecs, cnt = 0;
string str_s, str_d;
if(varNames.find(s) == varNames.end()) {
process_error(3, "couldn't find " + string(s) );
};
CudaSet *a;
a = varNames.find(s)->second;
a->name = s;
if(varNames.find(f) == varNames.end()) {
process_error(3, "couldn't find " + string(f) );
};
CudaSet *b;
b = varNames.find(f)->second;
b->name = f;
// if both source and destination are on disk
cout << "SOURCES " << a->source << ":" << b->source << endl;
if(a->source && b->source) {
for(unsigned int i = 0; i < a->segCount; i++) {
for(unsigned int z = 0; z < a->columnNames.size(); z++) {
if(a->type[a->columnNames[z]] != 2) {
str_s = a->load_file_name + "." + a->columnNames[z] + "." + to_string(i);
str_d = b->load_file_name + "." + a->columnNames[z] + "." + to_string(b->segCount + i);
cout << str_s << " " << str_d << endl;
FILE* source = fopen(str_s.c_str(), "rb");
FILE* dest = fopen(str_d.c_str(), "wb");
while (size = fread(buf, 1, BUFSIZ, source)) {
fwrite(buf, 1, size, dest);
}
fclose(source);
fclose(dest);
}
else { //merge strings
//read b's strings
str_s = b->load_file_name + "." + b->columnNames[z];
FILE* dest = fopen(str_s.c_str(), "rb");
auto len = b->char_size[b->columnNames[z]];
map<string, unsigned long long int> map_d;
buf[len] = 0;
unsigned long long cnt = 0;
while (fread(buf, len, 1, dest)) {
map_d[buf] = cnt;
cnt++;
};
fclose(dest);
unsigned long long int cct = cnt;
str_s = a->load_file_name + "." + a->columnNames[z] + "." + to_string(i) + ".hash";
str_d = b->load_file_name + "." + b->columnNames[z] + "." + to_string(b->segCount + i) + ".hash";
FILE* source = fopen(str_s.c_str(), "rb");
dest = fopen(str_d.c_str(), "wb");
while (size = fread(buf, 1, BUFSIZ, source)) {
fwrite(buf, 1, size, dest);
}
fclose(source);
fclose(dest);
str_s = a->load_file_name + "." + a->columnNames[z];
source = fopen(str_s.c_str(), "rb");
map<unsigned long long int, string> map_s;
buf[len] = 0;
cnt = 0;
while (fread(buf, len, 1, source)) {
map_s[cnt] = buf;
cnt++;
};
fclose(source);
queue<string> op_vx;
op_vx.push(a->columnNames[z]);
allocColumns(a, op_vx);
a->resize(a->maxRecs);
a->CopyColumnToGpu(a->columnNames[z], z, 0);
a->CopyColumnToHost(a->columnNames[z]);
str_d = b->load_file_name + "." + b->columnNames[z];
fstream f_file;
f_file.open(str_d.c_str(), ios::out|ios::app|ios::binary);
for(auto j = 0; j < a->mRecCount; j++) {
auto ss = map_s[a->h_columns_int[a->columnNames[z]][j]];
if(map_d.find(ss) == map_d.end()) { //add
f_file.write((char *)ss.c_str(), len);
a->h_columns_int[a->columnNames[z]][j] = cct;
cct++;
}
else {
a->h_columns_int[a->columnNames[z]][j] = map_d[ss];
};
};
f_file.close();
thrust::device_vector<int_type> d_col(a->mRecCount);
thrust::copy(a->h_columns_int[a->columnNames[z]].begin(), a->h_columns_int[a->columnNames[z]].begin() + a->mRecCount, d_col.begin());
auto i_name = b->load_file_name + "." + b->columnNames[z] + "." + to_string(b->segCount + i) + ".idx";
pfor_compress(thrust::raw_pointer_cast(d_col.data()), a->mRecCount*int_size, i_name, a->h_columns_int[a->columnNames[z]], 0);
};
};
};
if(a->maxRecs > b->maxRecs)
maxRecs = a->maxRecs;
else
maxRecs = b->maxRecs;
for(unsigned int i = 0; i < b->columnNames.size(); i++) {
b->reWriteHeader(b->load_file_name, b->columnNames[i], a->segCount + b->segCount, a->totalRecs + b->totalRecs, maxRecs);
};
}
else
if(!a->source && !b->source) { //if both source and destination are in memory
size_t oldCount = b->mRecCount;
b->resize(a->mRecCount);
for(unsigned int z = 0; z< b->mColumnCount; z++) {
if(b->type[a->columnNames[z]] == 0) {
thrust::copy(a->h_columns_int[a->columnNames[z]].begin(), a->h_columns_int[a->columnNames[z]].begin() + a->mRecCount, b->h_columns_int[b->columnNames[z]].begin() + oldCount);
}
else
if(b->type[a->columnNames[z]] == 1) {
thrust::copy(a->h_columns_float[a->columnNames[z]].begin(), a->h_columns_float[a->columnNames[z]].begin() + a->mRecCount, b->h_columns_float[b->columnNames[z]].begin() + oldCount);
}
else {
cudaMemcpy(b->h_columns_char[b->columnNames[z]] + b->char_size[b->columnNames[z]]*oldCount, a->h_columns_char[a->columnNames[z]], a->char_size[a->columnNames[z]]*a->mRecCount, cudaMemcpyHostToHost);
};
};
}
else
if(!a->source && b->source) {
total_segments = b->segCount;
total_count = b->mRecCount;
total_max = b->maxRecs;;
queue<string> op_vx;
for(unsigned int i=0; i < a->columnNames.size(); i++)
op_vx.push(a->columnNames[i]);
allocColumns(a, op_vx);
a->resize(a->maxRecs);
for(unsigned int i = 0; i < a->segCount; i++) {
if (a->filtered) {
copyColumns(a, op_vx, i, cnt);
a->CopyToHost(0, a->mRecCount);
};
a->compress(b->load_file_name, 0, 1, i - (a->segCount-1), a->mRecCount, 0);
};
for(unsigned int i = 0; i < b->columnNames.size(); i++) {
b->writeHeader(b->load_file_name, b->columnNames[i], total_segments);
};
};
};
void delete_records(const char* f) {
CudaSet *a;
a = varNames.find(f)->second;
a->name = f;
size_t totalRemoved = 0;
size_t maxRecs = 0;
if(!a->keep) { // temporary variable
process_error(2, "Delete operator is only applicable to disk based sets\nfor deleting records from derived sets please use filter operator ");
}
else { // read matching segments, delete, compress and write on a disk replacing the original segments
string str, str_old;
queue<string> op_vx;
size_t cnt;
for ( auto it=data_dict[a->load_file_name].begin() ; it != data_dict[a->load_file_name].end(); ++it ) {
op_vx.push((*it).first);
if (std::find(a->columnNames.begin(), a->columnNames.end(), (*it).first) == a->columnNames.end()) {
if ((*it).second.col_type == 0) {
a->type[(*it).first] = 0;
a->decimal[(*it).first] = 0;
//a->h_columns_int[(*it).first] = thrust::host_vector<int_type, pinned_allocator<int_type> >();
a->h_columns_int[(*it).first] = thrust::host_vector<int_type>();
a->d_columns_int[(*it).first] = thrust::device_vector<int_type>();
}
else
if((*it).second.col_type == 1) {
a->type[(*it).first] = 1;
a->decimal[(*it).first] = 0;
//a->h_columns_float[(*it).first] = thrust::host_vector<float_type, pinned_allocator<float_type> >();
a->h_columns_float[(*it).first] = thrust::host_vector<float_type>();
a->d_columns_float[(*it).first] = thrust::device_vector<float_type>();
}
else
if ((*it).second.col_type == 3) {
a->type[(*it).first] = 1;
a->decimal[(*it).first] = 1;
//a->h_columns_float[(*it).first] = thrust::host_vector<float_type, pinned_allocator<float_type> >();
a->h_columns_float[(*it).first] = thrust::host_vector<float_type>();
a->d_columns_float[(*it).first] = thrust::device_vector<float_type>();
}
else {
a->type[(*it).first] = 2;
a->decimal[(*it).first] = 0;
a->h_columns_char[(*it).first] = nullptr;
a->d_columns_char[(*it).first] = nullptr;
a->char_size[(*it).first] = (*it).second.col_length;
};
a->columnNames.push_back((*it).first);
}
};
allocColumns(a, op_vx);
a->resize(a->maxRecs);
a->prm_d.resize(a->maxRecs);
size_t cc = a->mRecCount;
size_t tmp;
void* d;
CUDA_SAFE_CALL(cudaMalloc((void **) &d, a->maxRecs*float_size));
unsigned int new_seg_count = 0;
char map_check;
for(unsigned int i = 0; i < a->segCount; i++) {
map_check = zone_map_check(op_type,op_value,op_nums, op_nums_f, op_nums_precision, a, i);
if(verbose)
cout << "MAP CHECK segment " << i << " " << map_check << endl;
if(map_check != 'N') {
cnt = 0;
copyColumns(a, op_vx, i, cnt);
tmp = a->mRecCount;
if(a->mRecCount) {
bool* res = filter(op_type,op_value,op_nums, op_nums_f, op_nums_precision, a, i);
thrust::device_ptr<bool> bp((bool*)res);
thrust::copy_if(thrust::make_counting_iterator((unsigned int)0), thrust::make_counting_iterator((unsigned int)a->mRecCount),
bp, a->prm_d.begin(), thrust::logical_not<bool>());
a->mRecCount = thrust::count(bp, bp + (unsigned int)a->mRecCount, 0);
cudaFree(res);
// cout << "Remained recs count " << a->mRecCount << endl;
if(a->mRecCount > maxRecs)
maxRecs = a->mRecCount;
if (a->mRecCount) {
totalRemoved = totalRemoved + (tmp - a->mRecCount);
if (a->mRecCount == tmp) { //none deleted
if(new_seg_count != i) {
for (auto it=data_dict[a->load_file_name].begin() ; it != data_dict[a->load_file_name].end(); ++it ) {
auto colname = (*it).first;
str_old = a->load_file_name + "." + colname + "." + to_string(i);
str = a->load_file_name + "." + colname + "." + to_string(new_seg_count);
remove(str.c_str());
rename(str_old.c_str(), str.c_str());
};
};
new_seg_count++;
}
else { //some deleted
//cout << "writing segment " << new_seg_count << endl;
map<string, col_data> s = data_dict[a->load_file_name];
for ( map<string, col_data>::iterator it=s.begin() ; it != s.end(); ++it ) {
string colname = (*it).first;
str = a->load_file_name + "." + colname + "." + to_string(new_seg_count);
if(a->type[colname] == 0) {
thrust::device_ptr<int_type> d_col((int_type*)d);
thrust::gather(a->prm_d.begin(), a->prm_d.begin() + a->mRecCount, a->d_columns_int[colname].begin(), d_col);
pfor_compress( d, a->mRecCount*int_size, str, a->h_columns_int[colname], 0);
}
else
if(a->type[colname] == 1) {
thrust::device_ptr<float_type> d_col((float_type*)d);
if(a->decimal[colname]) {
thrust::gather(a->prm_d.begin(), a->prm_d.begin() + a->mRecCount, a->d_columns_float[colname].begin(), d_col);
thrust::device_ptr<long long int> d_col_dec((long long int*)d);
thrust::transform(d_col,d_col+a->mRecCount, d_col_dec, float_to_long());
pfor_compress( d, a->mRecCount*float_size, str, a->h_columns_float[colname], 1);
}
else {
thrust::gather(a->prm_d.begin(), a->prm_d.begin() + a->mRecCount, a->d_columns_float[colname].begin(), d_col);
thrust::copy(d_col, d_col + a->mRecCount, a->h_columns_float[colname].begin());
fstream binary_file(str.c_str(),ios::out|ios::binary);
binary_file.write((char *)&a->mRecCount, 4);
binary_file.write((char *)(a->h_columns_float[colname].data()),a->mRecCount*float_size);
unsigned int comp_type = 3;
binary_file.write((char *)&comp_type, 4);
binary_file.close();
};
}
else {
thrust::device_ptr<int_type> d_col((int_type*)d);
thrust::gather(a->prm_d.begin(), a->prm_d.begin() + a->mRecCount, a->d_columns_int[colname].begin(), d_col);
pfor_compress( d, a->mRecCount*int_size, str + ".hash", a->h_columns_int[colname], 0);
};
};
new_seg_count++;
};
}
else {
totalRemoved = totalRemoved + tmp;
};
}
}
else {
if(new_seg_count != i) {
for(unsigned int z = 0; z < a->columnNames.size(); z++) {
str_old = a->load_file_name + "." + a->columnNames[z] + "." + to_string(i);
str = a->load_file_name + "." + a->columnNames[z] + "." + to_string(new_seg_count);
remove(str.c_str());
rename(str_old.c_str(), str.c_str());
};
};
new_seg_count++;
maxRecs = a->maxRecs;
};
};
if (new_seg_count < a->segCount) {
for(unsigned int i = new_seg_count; i < a->segCount; i++) {
//cout << "delete segment " << i << endl;
for(unsigned int z = 0; z < a->columnNames.size(); z++) {
str = a->load_file_name + "." + a->columnNames[z];
str += "." + to_string(i);
remove(str.c_str());
};
};
};
for(unsigned int i = new_seg_count; i < a->segCount; i++) {
a->reWriteHeader(a->load_file_name, a->columnNames[i], new_seg_count, a->totalRecs-totalRemoved, maxRecs);
};
a->mRecCount = cc;
a->prm_d.resize(0);
a->segCount = new_seg_count;
a->deAllocOnDevice();
cudaFree(d);
};
};
void save_col_data(map<string, map<string, col_data> >& data_dict, string file_name)
{
size_t str_len;
fstream binary_file(file_name.c_str(),ios::out|ios::binary|ios::trunc);
size_t len = data_dict.size();
binary_file.write((char *)&len, 8);
for (auto it=data_dict.begin() ; it != data_dict.end(); ++it ) {
str_len = (*it).first.size();
binary_file.write((char *)&str_len, 8);
binary_file.write((char *)(*it).first.data(), str_len);
map<string, col_data> s = (*it).second;
size_t len1 = s.size();
binary_file.write((char *)&len1, 8);
for (auto sit=s.begin() ; sit != s.end(); ++sit ) {
str_len = (*sit).first.size();
binary_file.write((char *)&str_len, 8);
binary_file.write((char *)(*sit).first.data(), str_len);
binary_file.write((char *)&(*sit).second.col_type, 4);
binary_file.write((char *)&(*sit).second.col_length, 4);
};
};
binary_file.close();
}
void load_col_data(map<string, map<string, col_data> >& data_dict, string file_name)
{
size_t str_len, recs, len1;
string str1, str2;
char buffer[4000];
unsigned int col_type, col_length;
fstream binary_file;
binary_file.open(file_name.c_str(),ios::in|ios::binary);
if(binary_file.is_open()) {
binary_file.read((char*)&recs, 8);
for(unsigned int i = 0; i < recs; i++) {
binary_file.read((char*)&str_len, 8);
binary_file.read(buffer, str_len);
str1.assign(buffer, str_len);
binary_file.read((char*)&len1, 8);
for(unsigned int j = 0; j < len1; j++) {
binary_file.read((char*)&str_len, 8);
binary_file.read(buffer, str_len);
str2.assign(buffer, str_len);
binary_file.read((char*)&col_type, 4);
binary_file.read((char*)&col_length, 4);
data_dict[str1][str2].col_type = col_type;
data_dict[str1][str2].col_length = col_length;
//cout << "data DICT " << str1 << " " << str2 << " " << col_type << " " << col_length << endl;
};
};
binary_file.close();
}
else {
cout << "Couldn't open data dictionary" << endl;
};
}
bool var_exists(CudaSet* a, string name) {
if(std::find(a->columnNames.begin(), a->columnNames.end(), name) != a->columnNames.end())
return 1;
else
return 0;
}
int file_exist (const char *filename)
{
std::ifstream infile(filename);
return infile.good();
}
bool check_bitmap_file_exist(CudaSet* left, CudaSet* right)
{
queue<string> cols(right->fil_value);
bool bitmaps_exist = 1;
if(cols.size() == 0) {
bitmaps_exist = 0;
};
while(cols.size() ) {
if (std::find(right->columnNames.begin(), right->columnNames.end(), cols.front()) != right->columnNames.end()) {
string fname = left->load_file_name + "." + right->load_file_name + "." + cols.front() + ".0";
if( !file_exist(fname.c_str())) {
bitmaps_exist = 0;
};
};
cols.pop();
};
return bitmaps_exist;
}
bool check_bitmaps_exist(CudaSet* left, CudaSet* right)
{
//check if there are join bitmap indexes
queue<string> cols(right->fil_value);
bool bitmaps_exist = 1;
if(cols.size() == 0) {
bitmaps_exist = 1;
return 1;
};
while(cols.size() ) {
if (std::find(right->columnNames.begin(), right->columnNames.end(), cols.front()) != right->columnNames.end()) {
string fname = left->load_file_name + "." + right->load_file_name + "." + cols.front() + ".0";
if( !file_exist(fname.c_str())) {
bitmaps_exist = 0;
};
};
cols.pop();
};
if(bitmaps_exist) {
while(!right->fil_nums.empty() ) {
left->fil_nums.push(right->fil_nums.front());
right->fil_nums.pop();
};
while(!right->fil_nums_precision.empty() ) {
left->fil_nums_precision.push(right->fil_nums_precision.front());
right->fil_nums_precision.pop();
};
while(!right->fil_nums_f.empty() ) {
left->fil_nums_f.push(right->fil_nums_f.front());
right->fil_nums_f.pop();
};
while(!right->fil_value.empty() ) {
if (std::find(right->columnNames.begin(), right->columnNames.end(), right->fil_value.front()) != right->columnNames.end()) {
string fname = left->load_file_name + "." + right->load_file_name + "." + right->fil_value.front();
left->fil_value.push(fname);
}
else
left->fil_value.push(right->fil_value.front());
right->fil_value.pop();
};
bool add_and = 1;
if(left->fil_type.empty())
add_and = 0;
while(!right->fil_type.empty() ) {
left->fil_type.push(right->fil_type.front());
right->fil_type.pop();
};
if(add_and) {
left->fil_type.push("AND");
};
return 1;
}
else {
return 0;
};
}
void check_sort(const string str, const char* rtable, const char* rid)
{
CudaSet* right = varNames.find(rtable)->second;
fstream binary_file(str.c_str(),ios::out|ios::binary|ios::app);
binary_file.write((char *)&right->sort_check, 1);
binary_file.close();
}
void update_char_permutation(CudaSet* a, string colname, unsigned int* raw_ptr, string ord, void* temp, bool host)
{
auto s = a->string_map[colname];
auto pos = s.find_first_of(".");
auto len = data_dict[s.substr(0, pos)][s.substr(pos+1)].col_length;
a->h_columns_char[colname] = new char[a->mRecCount*len];
memset(a->h_columns_char[colname], 0, a->mRecCount*len);
thrust::device_ptr<unsigned int> perm(raw_ptr);
thrust::device_ptr<int_type> temp_int((int_type*)temp);
thrust::gather(perm, perm+a->mRecCount, a->d_columns_int[colname].begin(), temp_int);
//for(int z = 0 ; z < a->mRecCount; z++) {
//cout << "Init vals " << a->d_columns_int[colname][z] << " " << perm[z] << " " << temp_int[z] << endl;
//};
//cout << "sz " << a->h_columns_int[colname].size() << " " << a->d_columns_int[colname].size() << " " << len << endl;
cudaMemcpy(thrust::raw_pointer_cast(a->h_columns_int[colname].data()), temp, 8*a->mRecCount, cudaMemcpyDeviceToHost);
FILE *f;
f = fopen(a->string_map[colname].c_str(), "rb");
for(int z = 0 ; z < a->mRecCount; z++) {
fseek(f, a->h_columns_int[colname][z] * len, SEEK_SET);
fread(a->h_columns_char[colname] + z*len, 1, len, f);
};
fclose(f);
if(!host) {
void *d;
cudaMalloc((void **) &d, a->mRecCount*len);
a->d_columns_char[colname] = (char*)d;
cudaMemcpy(a->d_columns_char[colname], a->h_columns_char[colname], len*a->mRecCount, cudaMemcpyHostToDevice);
if (ord.compare("DESC") == 0 )
str_sort(a->d_columns_char[colname], a->mRecCount, raw_ptr, 1, len);
else
str_sort(a->d_columns_char[colname], a->mRecCount, raw_ptr, 0, len);
cudaFree(d);
}
else {
if (ord.compare("DESC") == 0 )
str_sort_host(a->h_columns_char[colname], a->mRecCount, raw_ptr, 1, len);
else
str_sort_host(a->h_columns_char[colname], a->mRecCount, raw_ptr, 0, len);
};
}
void compress_int(const string file_name, const thrust::host_vector<int_type>& res)
{
std::vector<unsigned int> dict_val;
unsigned int bits_encoded;
set<int_type> dict_s;
map<int_type, unsigned int> d_ordered;
for (unsigned int i = 0 ; i < res.size(); i++) {
int_type f = res[i];
dict_s.insert(f);
};
unsigned int i = 0;
for (auto it = dict_s.begin(); it != dict_s.end(); it++) {
d_ordered[*it] = i++;
};
for (unsigned int i = 0 ; i < res.size(); i++) {
int_type f = res[i];
dict_val.push_back(d_ordered[f]);
};
bits_encoded = (unsigned int)ceil(log2(double(d_ordered.size()+1)));
//cout << "bits " << bits_encoded << endl;
unsigned int sz = (unsigned int)d_ordered.size();
// write to a file
fstream binary_file(file_name.c_str(),ios::out|ios::binary|ios::trunc);
binary_file.write((char *)&sz, 4);
for (auto it = d_ordered.begin(); it != d_ordered.end(); it++) {
binary_file.write((char*)(&(it->first)), int_size);
};
unsigned int fit_count = 64/bits_encoded;
unsigned long long int val = 0;
binary_file.write((char *)&fit_count, 4);
binary_file.write((char *)&bits_encoded, 4);
unsigned int curr_cnt = 1;
unsigned int vals_count = (unsigned int)dict_val.size()/fit_count;
if(!vals_count || dict_val.size()%fit_count)
vals_count++;
binary_file.write((char *)&vals_count, 4);
unsigned int real_count = (unsigned int)dict_val.size();
binary_file.write((char *)&real_count, 4);
for(unsigned int i = 0; i < dict_val.size(); i++) {
val = val | dict_val[i];
if(curr_cnt < fit_count)
val = val << bits_encoded;
if( (curr_cnt == fit_count) || (i == (dict_val.size() - 1)) ) {
if (curr_cnt < fit_count) {
val = val << ((fit_count-curr_cnt)-1)*bits_encoded;
};
curr_cnt = 1;
binary_file.write((char *)&val, int_size);
val = 0;
}
else
curr_cnt = curr_cnt + 1;
};
binary_file.close();
};
int_type* get_vec(CudaSet* a, string s1_val, stack<int_type*>& exe_vectors) {
int_type* t;
if(std::find(a->columnNames.begin(), a->columnNames.end(), s1_val) != a->columnNames.end())
t = a->get_int_by_name(s1_val);
else {
t = exe_vectors.top();
exe_vectors.pop();
}
return t;
};
int_type* get_host_vec(CudaSet* a, string s1_val, stack<int_type*>& exe_vectors) {
int_type* t;
if(std::find(a->columnNames.begin(), a->columnNames.end(), s1_val) != a->columnNames.end()) {
t = a->get_host_int_by_name(s1_val);
}
else {
t = exe_vectors.top();
thrust::device_ptr<int_type> st1((int_type*)t);
for(int z = 0; z < 10; z++)
cout << "RESVEC " << st1[z] << endl;
exe_vectors.pop();
}
return t;
};
unsigned int get_decimals(CudaSet* a, string s1_val, stack<unsigned int>& exe_precision) {
unsigned int t;
if(std::find(a->columnNames.begin(), a->columnNames.end(), s1_val) != a->columnNames.end())
t = a->decimal_zeroes[s1_val];
else {
t = exe_precision.top();
exe_precision.pop();
}
return t;
};
#ifdef _WIN64
size_t getTotalSystemMemory()
{
MEMORYSTATUSEX status;
status.dwLength = sizeof(status);
GlobalMemoryStatusEx(&status);
return status.ullTotalPhys;
}
#else
size_t getTotalSystemMemory()
{
long pages = sysconf(_SC_PHYS_PAGES);
long page_size = sysconf(_SC_PAGE_SIZE);
return pages * page_size;
}
#endif
|
6f49476788341a0dbee0ad8390ba647d0017f322.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <iostream>
#include <math.h>
__global__ void vecAdd(double *a, double *b, double *c, int n){
// Global Thread ID
int id = blockIdx.x*blockDim.x + threadIdx.x;
// Check to make sure we are in range
if (id < n){
c[id] = a[id] + b[id];
}
}
int main(){
int n = 1024;
// Initializing host vectors
double *a, *b, *c;
a = (double*)malloc(sizeof(double)*n);
b = (double*)malloc(sizeof(double)*n);
c = (double*)malloc(sizeof(double)*n);
// Initializing all device vectors
double *d_a, *d_b, *d_c;
hipMalloc(&d_a, sizeof(double)*n);
hipMalloc(&d_b, sizeof(double)*n);
hipMalloc(&d_c, sizeof(double)*n);
// Initializing a and b
for (size_t i = 0; i < n; ++i){
a[i] = 1;
b[i] = 1;
c[i] = 0;
}
hipMemcpy(d_a, a, sizeof(double)*n, hipMemcpyHostToDevice);
hipMemcpy(d_b, b, sizeof(double)*n, hipMemcpyHostToDevice);
dim3 threads, grid;
threads = {100, 1, 1};
grid = {(unsigned int)ceil((float)n/threads.x), 1, 1};
hipLaunchKernelGGL(( vecAdd), dim3(grid), dim3(threads), 0, 0, d_a, d_b, d_c, n);
/*
// Vector Addition
for (size_t i = 0; i < n; ++i){
c[i] = a[i] + b[i];
}
*/
// Copying back to host
hipMemcpy(c, d_c, sizeof(double)*n, hipMemcpyDeviceToHost);
// Check to make sure everything works
for (size_t i = 0; i < n; ++i){
if (c[i] != a[i] + b[i]){
std::cout << "Yo. You failed. What a loser! Ha\n";
exit(1);
}
}
std::cout << "You passed the test, congratulations!\n";
free(a);
free(b);
free(c);
hipFree(d_a);
hipFree(d_b);
hipFree(d_c);
}
| 6f49476788341a0dbee0ad8390ba647d0017f322.cu | #include <iostream>
#include <math.h>
__global__ void vecAdd(double *a, double *b, double *c, int n){
// Global Thread ID
int id = blockIdx.x*blockDim.x + threadIdx.x;
// Check to make sure we are in range
if (id < n){
c[id] = a[id] + b[id];
}
}
int main(){
int n = 1024;
// Initializing host vectors
double *a, *b, *c;
a = (double*)malloc(sizeof(double)*n);
b = (double*)malloc(sizeof(double)*n);
c = (double*)malloc(sizeof(double)*n);
// Initializing all device vectors
double *d_a, *d_b, *d_c;
cudaMalloc(&d_a, sizeof(double)*n);
cudaMalloc(&d_b, sizeof(double)*n);
cudaMalloc(&d_c, sizeof(double)*n);
// Initializing a and b
for (size_t i = 0; i < n; ++i){
a[i] = 1;
b[i] = 1;
c[i] = 0;
}
cudaMemcpy(d_a, a, sizeof(double)*n, cudaMemcpyHostToDevice);
cudaMemcpy(d_b, b, sizeof(double)*n, cudaMemcpyHostToDevice);
dim3 threads, grid;
threads = {100, 1, 1};
grid = {(unsigned int)ceil((float)n/threads.x), 1, 1};
vecAdd<<<grid, threads>>>(d_a, d_b, d_c, n);
/*
// Vector Addition
for (size_t i = 0; i < n; ++i){
c[i] = a[i] + b[i];
}
*/
// Copying back to host
cudaMemcpy(c, d_c, sizeof(double)*n, cudaMemcpyDeviceToHost);
// Check to make sure everything works
for (size_t i = 0; i < n; ++i){
if (c[i] != a[i] + b[i]){
std::cout << "Yo. You failed. What a loser! Ha\n";
exit(1);
}
}
std::cout << "You passed the test, congratulations!\n";
free(a);
free(b);
free(c);
cudaFree(d_a);
cudaFree(d_b);
cudaFree(d_c);
}
|
06edb1bd1fe15106589dfd143a3a4a5080b99bf9.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
// Copyright (c) 2022 PaddlePaddle Authors. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
//
// The code is based on
// https://github.com/facebookresearch/detectron2/blob/main/detectron2/layers/csrc/box_iou_rotated/
#include "../rbox_iou/rbox_iou_utils.h"
#include "paddle/extension.h"
template <typename T>
__global__ void
matched_rbox_iou_cuda_kernel(const int rbox_num, const T *rbox1_data_ptr,
const T *rbox2_data_ptr, T *output_data_ptr) {
for (int tid = blockIdx.x * blockDim.x + threadIdx.x; tid < rbox_num;
tid += blockDim.x * gridDim.x) {
output_data_ptr[tid] =
rbox_iou_single<T>(rbox1_data_ptr + tid * 5, rbox2_data_ptr + tid * 5);
}
}
#define CHECK_INPUT_GPU(x) \
PD_CHECK(x.is_gpu(), #x " must be a GPU Tensor.")
std::vector<paddle::Tensor>
MatchedRboxIouCUDAForward(const paddle::Tensor &rbox1,
const paddle::Tensor &rbox2) {
CHECK_INPUT_GPU(rbox1);
CHECK_INPUT_GPU(rbox2);
PD_CHECK(rbox1.shape()[0] == rbox2.shape()[0], "inputs must be same dim");
auto rbox_num = rbox1.shape()[0];
auto output = paddle::empty({rbox_num}, rbox1.dtype(), paddle::GPUPlace());
const int thread_per_block = 512;
const int block_per_grid = CeilDiv(rbox_num, thread_per_block);
PD_DISPATCH_FLOATING_TYPES(
rbox1.type(), "matched_rbox_iou_cuda_kernel", ([&] {
hipLaunchKernelGGL(( matched_rbox_iou_cuda_kernel<
data_t>), dim3(block_per_grid), dim3(thread_per_block), 0, rbox1.stream(),
rbox_num, rbox1.data<data_t>(), rbox2.data<data_t>(),
output.data<data_t>());
}));
return {output};
}
| 06edb1bd1fe15106589dfd143a3a4a5080b99bf9.cu | // Copyright (c) 2022 PaddlePaddle Authors. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
//
// The code is based on
// https://github.com/facebookresearch/detectron2/blob/main/detectron2/layers/csrc/box_iou_rotated/
#include "../rbox_iou/rbox_iou_utils.h"
#include "paddle/extension.h"
template <typename T>
__global__ void
matched_rbox_iou_cuda_kernel(const int rbox_num, const T *rbox1_data_ptr,
const T *rbox2_data_ptr, T *output_data_ptr) {
for (int tid = blockIdx.x * blockDim.x + threadIdx.x; tid < rbox_num;
tid += blockDim.x * gridDim.x) {
output_data_ptr[tid] =
rbox_iou_single<T>(rbox1_data_ptr + tid * 5, rbox2_data_ptr + tid * 5);
}
}
#define CHECK_INPUT_GPU(x) \
PD_CHECK(x.is_gpu(), #x " must be a GPU Tensor.")
std::vector<paddle::Tensor>
MatchedRboxIouCUDAForward(const paddle::Tensor &rbox1,
const paddle::Tensor &rbox2) {
CHECK_INPUT_GPU(rbox1);
CHECK_INPUT_GPU(rbox2);
PD_CHECK(rbox1.shape()[0] == rbox2.shape()[0], "inputs must be same dim");
auto rbox_num = rbox1.shape()[0];
auto output = paddle::empty({rbox_num}, rbox1.dtype(), paddle::GPUPlace());
const int thread_per_block = 512;
const int block_per_grid = CeilDiv(rbox_num, thread_per_block);
PD_DISPATCH_FLOATING_TYPES(
rbox1.type(), "matched_rbox_iou_cuda_kernel", ([&] {
matched_rbox_iou_cuda_kernel<
data_t><<<block_per_grid, thread_per_block, 0, rbox1.stream()>>>(
rbox_num, rbox1.data<data_t>(), rbox2.data<data_t>(),
output.data<data_t>());
}));
return {output};
}
|
a00dd7169a90316d4be45ac2ac3041c01750f009.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
// RUN: %run_test hipify "%s" "%t" %cuda_args
/*
Copyright (c) 2015-2016 Advanced Micro Devices, Inc. All rights reserved.
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in
all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
THE SOFTWARE.
*/
#include<cuda.h>
#include<cuda_runtime.h>
#include<iostream>
#include<stdio.h>
#include<malloc.h>
#define LEN 1024
#define SIZE LEN * sizeof(float)
#define ITER 1024*1024
// CHECK: if(status != hipSuccess) {
#define check(msg, status){ \
if(status != hipSuccess) { \
printf("%s failed. \n", #msg); \
} \
}
__global__ void Inc1(float *Ad, float *Bd){
int tx = threadIdx.x + blockIdx.x * blockDim.x;
if(tx < 1 ){
for(int i=0;i<ITER;i++){
Ad[tx] = Ad[tx] + 1.0f;
for(int j=0;j<256;j++){
Bd[tx] = Ad[tx];
}
}
}
}
__global__ void Inc2(float *Ad, float *Bd){
int tx = threadIdx.x + blockIdx.x * blockDim.x;
if(tx < 1024){
for(int i=0;i<ITER;i++){
Ad[tx] = Ad[tx] + 1.0f;
for(int j=0;j<256;j++){
Bd[tx] = Ad[tx];
}
}
}
}
int main(){
float *A, *Ad, *Bd;
A = new float[LEN];
for(int i=0;i<LEN;i++){
A[i] = 0.0f;
}
// CHECK: hipError_t status;
hipError_t status;
// CHECK: status = hipHostRegister(A, SIZE, hipHostRegisterMapped);
status = hipHostRegister(A, SIZE, hipHostRegisterMapped);
check("Registering A",status);
// CHECK: hipHostGetDevicePointer(&Ad, A, 0);
hipHostGetDevicePointer(&Ad, A, 0);
// CHECK: hipMalloc((void**) &Bd, SIZE);
hipMalloc((void**) &Bd, SIZE);
dim3 dimGrid(LEN/512,1,1);
dim3 dimBlock(512,1,1);
// CHECK: hipLaunchKernelGGL(Inc1, dim3(dimGrid), dim3(dimBlock), 0, 0, Ad, Bd);
hipLaunchKernelGGL(( Inc1), dim3(dimGrid), dim3(dimBlock), 0, 0, Ad, Bd);
A[0] = -(ITER*1.0f);
std::cout<<"Same cache line before completion: \t"<< A[0]<<std::endl;
// CHECK: hipDeviceSynchronize();
hipDeviceSynchronize();
std::cout<<"Same cache line after completion: \t"<< A[0]<<std::endl;
for(int i=0;i<LEN;i++){
A[i] = 0.0f;
}
// CHECK: hipLaunchKernelGGL(Inc2, dim3(dimGrid), dim3(dimBlock), 0, 0, Ad, Bd);
hipLaunchKernelGGL(( Inc2), dim3(dimGrid), dim3(dimBlock), 0, 0, Ad, Bd);
A[0] = -(ITER*1.0f);
std::cout<<"Diff cache line before completion: \t"<<A[0]<<std::endl;
// CHECK: hipDeviceSynchronize();
hipDeviceSynchronize();
std::cout<<"Diff cache line after completion: \t"<<A[0]<<std::endl;
}
| a00dd7169a90316d4be45ac2ac3041c01750f009.cu | // RUN: %run_test hipify "%s" "%t" %cuda_args
/*
Copyright (c) 2015-2016 Advanced Micro Devices, Inc. All rights reserved.
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in
all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
THE SOFTWARE.
*/
#include<cuda.h>
#include<cuda_runtime.h>
#include<iostream>
#include<stdio.h>
#include<malloc.h>
#define LEN 1024
#define SIZE LEN * sizeof(float)
#define ITER 1024*1024
// CHECK: if(status != hipSuccess) {
#define check(msg, status){ \
if(status != cudaSuccess) { \
printf("%s failed. \n", #msg); \
} \
}
__global__ void Inc1(float *Ad, float *Bd){
int tx = threadIdx.x + blockIdx.x * blockDim.x;
if(tx < 1 ){
for(int i=0;i<ITER;i++){
Ad[tx] = Ad[tx] + 1.0f;
for(int j=0;j<256;j++){
Bd[tx] = Ad[tx];
}
}
}
}
__global__ void Inc2(float *Ad, float *Bd){
int tx = threadIdx.x + blockIdx.x * blockDim.x;
if(tx < 1024){
for(int i=0;i<ITER;i++){
Ad[tx] = Ad[tx] + 1.0f;
for(int j=0;j<256;j++){
Bd[tx] = Ad[tx];
}
}
}
}
int main(){
float *A, *Ad, *Bd;
A = new float[LEN];
for(int i=0;i<LEN;i++){
A[i] = 0.0f;
}
// CHECK: hipError_t status;
cudaError_t status;
// CHECK: status = hipHostRegister(A, SIZE, hipHostRegisterMapped);
status = cudaHostRegister(A, SIZE, cudaHostRegisterMapped);
check("Registering A",status);
// CHECK: hipHostGetDevicePointer(&Ad, A, 0);
cudaHostGetDevicePointer(&Ad, A, 0);
// CHECK: hipMalloc((void**) &Bd, SIZE);
cudaMalloc((void**) &Bd, SIZE);
dim3 dimGrid(LEN/512,1,1);
dim3 dimBlock(512,1,1);
// CHECK: hipLaunchKernelGGL(Inc1, dim3(dimGrid), dim3(dimBlock), 0, 0, Ad, Bd);
Inc1<<<dimGrid, dimBlock>>>(Ad, Bd);
A[0] = -(ITER*1.0f);
std::cout<<"Same cache line before completion: \t"<< A[0]<<std::endl;
// CHECK: hipDeviceSynchronize();
cudaDeviceSynchronize();
std::cout<<"Same cache line after completion: \t"<< A[0]<<std::endl;
for(int i=0;i<LEN;i++){
A[i] = 0.0f;
}
// CHECK: hipLaunchKernelGGL(Inc2, dim3(dimGrid), dim3(dimBlock), 0, 0, Ad, Bd);
Inc2<<<dimGrid, dimBlock>>>(Ad, Bd);
A[0] = -(ITER*1.0f);
std::cout<<"Diff cache line before completion: \t"<<A[0]<<std::endl;
// CHECK: hipDeviceSynchronize();
cudaDeviceSynchronize();
std::cout<<"Diff cache line after completion: \t"<<A[0]<<std::endl;
}
|
c433643545159c15b340c05c07e558d544277c78.hip | // !!! This is a file automatically generated by hipify!!!
#include <unittest/unittest.h>
#include <thrust/sequence.h>
#include <thrust/device_malloc_allocator.h>
#include <vector>
#include <list>
#include <limits>
#include <utility>
template <class Vector>
void TestVectorZeroSize(void)
{
Vector v;
ASSERT_EQUAL(v.size(), 0lu);
ASSERT_EQUAL((v.begin() == v.end()), true);
}
DECLARE_VECTOR_UNITTEST(TestVectorZeroSize);
void TestVectorBool(void)
{
thrust::host_vector<bool> h(3);
thrust::device_vector<bool> d(3);
h[0] = true; h[1] = false; h[2] = true;
d[0] = true; d[1] = false; d[2] = true;
ASSERT_EQUAL(h[0], true);
ASSERT_EQUAL(h[1], false);
ASSERT_EQUAL(h[2], true);
ASSERT_EQUAL(d[0], true);
ASSERT_EQUAL(d[1], false);
ASSERT_EQUAL(d[2], true);
}
DECLARE_UNITTEST(TestVectorBool);
template <class Vector>
void TestVectorFrontBack(void)
{
typedef typename Vector::value_type T;
Vector v(3);
v[0] = 0; v[1] = 1; v[2] = 2;
ASSERT_EQUAL(v.front(), T(0));
ASSERT_EQUAL(v.back(), T(2));
}
DECLARE_VECTOR_UNITTEST(TestVectorFrontBack);
template <class Vector>
void TestVectorData(void)
{
typedef typename Vector::pointer PointerT;
typedef typename Vector::const_pointer PointerConstT;
Vector v(3);
v[0] = 0; v[1] = 1; v[2] = 2;
ASSERT_EQUAL(0, *v.data());
ASSERT_EQUAL(1, *(v.data() + 1));
ASSERT_EQUAL(2, *(v.data() + 2));
ASSERT_EQUAL(PointerT(&v.front()), v.data());
ASSERT_EQUAL(PointerT(&*v.begin()), v.data());
ASSERT_EQUAL(PointerT(&v[0]), v.data());
const Vector &c_v = v;
ASSERT_EQUAL(0, *c_v.data());
ASSERT_EQUAL(1, *(c_v.data() + 1));
ASSERT_EQUAL(2, *(c_v.data() + 2));
ASSERT_EQUAL(PointerConstT(&c_v.front()), c_v.data());
ASSERT_EQUAL(PointerConstT(&*c_v.begin()), c_v.data());
ASSERT_EQUAL(PointerConstT(&c_v[0]), c_v.data());
}
DECLARE_VECTOR_UNITTEST(TestVectorData);
template <class Vector>
void TestVectorElementAssignment(void)
{
Vector v(3);
v[0] = 0; v[1] = 1; v[2] = 2;
ASSERT_EQUAL(v[0], 0);
ASSERT_EQUAL(v[1], 1);
ASSERT_EQUAL(v[2], 2);
v[0] = 10; v[1] = 11; v[2] = 12;
ASSERT_EQUAL(v[0], 10);
ASSERT_EQUAL(v[1], 11);
ASSERT_EQUAL(v[2], 12);
Vector w(3);
w[0] = v[0];
w[1] = v[1];
w[2] = v[2];
ASSERT_EQUAL(v, w);
}
DECLARE_VECTOR_UNITTEST(TestVectorElementAssignment);
template <class Vector>
void TestVectorFromSTLVector(void)
{
typedef typename Vector::value_type T;
std::vector<T> stl_vector(3);
stl_vector[0] = 0;
stl_vector[1] = 1;
stl_vector[2] = 2;
thrust::host_vector<T> v(stl_vector);
ASSERT_EQUAL(v.size(), 3lu);
ASSERT_EQUAL(v[0], 0);
ASSERT_EQUAL(v[1], 1);
ASSERT_EQUAL(v[2], 2);
v = stl_vector;
ASSERT_EQUAL(v.size(), 3lu);
ASSERT_EQUAL(v[0], 0);
ASSERT_EQUAL(v[1], 1);
ASSERT_EQUAL(v[2], 2);
}
DECLARE_VECTOR_UNITTEST(TestVectorFromSTLVector);
template <class Vector>
void TestVectorFillAssign(void)
{
typedef typename Vector::value_type T;
thrust::host_vector<T> v;
v.assign(3, 13);
ASSERT_EQUAL(v.size(), 3lu);
ASSERT_EQUAL(v[0], 13);
ASSERT_EQUAL(v[1], 13);
ASSERT_EQUAL(v[2], 13);
}
DECLARE_VECTOR_UNITTEST(TestVectorFillAssign);
template <class Vector>
void TestVectorAssignFromSTLVector(void)
{
typedef typename Vector::value_type T;
std::vector<T> stl_vector(3);
stl_vector[0] = 0;
stl_vector[1] = 1;
stl_vector[2] = 2;
thrust::host_vector<T> v;
v.assign(stl_vector.begin(), stl_vector.end());
ASSERT_EQUAL(v.size(), 3lu);
ASSERT_EQUAL(v[0], 0);
ASSERT_EQUAL(v[1], 1);
ASSERT_EQUAL(v[2], 2);
}
DECLARE_VECTOR_UNITTEST(TestVectorAssignFromSTLVector);
template <class Vector>
void TestVectorFromBiDirectionalIterator(void)
{
typedef typename Vector::value_type T;
std::list<T> stl_list;
stl_list.push_back(0);
stl_list.push_back(1);
stl_list.push_back(2);
Vector v(stl_list.begin(), stl_list.end());
ASSERT_EQUAL(v.size(), 3lu);
ASSERT_EQUAL(v[0], 0);
ASSERT_EQUAL(v[1], 1);
ASSERT_EQUAL(v[2], 2);
}
DECLARE_VECTOR_UNITTEST(TestVectorFromBiDirectionalIterator);
template <class Vector>
void TestVectorAssignFromBiDirectionalIterator(void)
{
typedef typename Vector::value_type T;
std::list<T> stl_list;
stl_list.push_back(0);
stl_list.push_back(1);
stl_list.push_back(2);
Vector v;
v.assign(stl_list.begin(), stl_list.end());
ASSERT_EQUAL(v.size(), 3lu);
ASSERT_EQUAL(v[0], 0);
ASSERT_EQUAL(v[1], 1);
ASSERT_EQUAL(v[2], 2);
}
DECLARE_VECTOR_UNITTEST(TestVectorAssignFromBiDirectionalIterator);
template <class Vector>
void TestVectorAssignFromHostVector(void)
{
typedef typename Vector::value_type T;
thrust::host_vector<T> h(3);
h[0] = 0;
h[1] = 1;
h[2] = 2;
Vector v;
v.assign(h.begin(), h.end());
ASSERT_EQUAL(v, h);
}
DECLARE_VECTOR_UNITTEST(TestVectorAssignFromHostVector);
template <class Vector>
void TestVectorToAndFromHostVector(void)
{
typedef typename Vector::value_type T;
thrust::host_vector<T> h(3);
h[0] = 0;
h[1] = 1;
h[2] = 2;
Vector v(h);
ASSERT_EQUAL(v, h);
THRUST_DISABLE_CLANG_SELF_ASSIGNMENT_WARNING(v = v);
ASSERT_EQUAL(v, h);
v[0] = 10;
v[1] = 11;
v[2] = 12;
ASSERT_EQUAL(h[0], 0); ASSERT_EQUAL(v[0], 10);
ASSERT_EQUAL(h[1], 1); ASSERT_EQUAL(v[1], 11);
ASSERT_EQUAL(h[2], 2); ASSERT_EQUAL(v[2], 12);
h = v;
ASSERT_EQUAL(v, h);
h[1] = 11;
v = h;
ASSERT_EQUAL(v, h);
}
DECLARE_VECTOR_UNITTEST(TestVectorToAndFromHostVector);
template <class Vector>
void TestVectorAssignFromDeviceVector(void)
{
typedef typename Vector::value_type T;
thrust::device_vector<T> d(3);
d[0] = 0;
d[1] = 1;
d[2] = 2;
Vector v;
v.assign(d.begin(), d.end());
ASSERT_EQUAL(v, d);
}
DECLARE_VECTOR_UNITTEST(TestVectorAssignFromDeviceVector);
template <class Vector>
void TestVectorToAndFromDeviceVector(void)
{
typedef typename Vector::value_type T;
thrust::device_vector<T> h(3);
h[0] = 0;
h[1] = 1;
h[2] = 2;
Vector v(h);
ASSERT_EQUAL(v, h);
THRUST_DISABLE_CLANG_SELF_ASSIGNMENT_WARNING(v = v);
ASSERT_EQUAL(v, h);
v[0] = 10;
v[1] = 11;
v[2] = 12;
ASSERT_EQUAL(h[0], 0); ASSERT_EQUAL(v[0], 10);
ASSERT_EQUAL(h[1], 1); ASSERT_EQUAL(v[1], 11);
ASSERT_EQUAL(h[2], 2); ASSERT_EQUAL(v[2], 12);
h = v;
ASSERT_EQUAL(v, h);
h[1] = 11;
v = h;
ASSERT_EQUAL(v, h);
}
DECLARE_VECTOR_UNITTEST(TestVectorToAndFromDeviceVector);
template <class Vector>
void TestVectorWithInitialValue(void)
{
typedef typename Vector::value_type T;
const T init = 17;
Vector v(3, init);
ASSERT_EQUAL(v.size(), 3lu);
ASSERT_EQUAL(v[0], init);
ASSERT_EQUAL(v[1], init);
ASSERT_EQUAL(v[2], init);
}
DECLARE_VECTOR_UNITTEST(TestVectorWithInitialValue);
template <class Vector>
void TestVectorSwap(void)
{
Vector v(3);
v[0] = 0; v[1] = 1; v[2] = 2;
Vector u(3);
u[0] = 10; u[1] = 11; u[2] = 12;
v.swap(u);
ASSERT_EQUAL(v[0], 10); ASSERT_EQUAL(u[0], 0);
ASSERT_EQUAL(v[1], 11); ASSERT_EQUAL(u[1], 1);
ASSERT_EQUAL(v[2], 12); ASSERT_EQUAL(u[2], 2);
}
DECLARE_VECTOR_UNITTEST(TestVectorSwap);
template <class Vector>
void TestVectorErasePosition(void)
{
Vector v(5);
v[0] = 0; v[1] = 1; v[2] = 2; v[3] = 3; v[4] = 4;
v.erase(v.begin() + 2);
ASSERT_EQUAL(v.size(), 4lu);
ASSERT_EQUAL(v[0], 0);
ASSERT_EQUAL(v[1], 1);
ASSERT_EQUAL(v[2], 3);
ASSERT_EQUAL(v[3], 4);
v.erase(v.begin() + 0);
ASSERT_EQUAL(v.size(), 3lu);
ASSERT_EQUAL(v[0], 1);
ASSERT_EQUAL(v[1], 3);
ASSERT_EQUAL(v[2], 4);
v.erase(v.begin() + 2);
ASSERT_EQUAL(v.size(), 2lu);
ASSERT_EQUAL(v[0], 1);
ASSERT_EQUAL(v[1], 3);
v.erase(v.begin() + 1);
ASSERT_EQUAL(v.size(), 1lu);
ASSERT_EQUAL(v[0], 1);
v.erase(v.begin() + 0);
ASSERT_EQUAL(v.size(), 0lu);
}
DECLARE_VECTOR_UNITTEST(TestVectorErasePosition);
template <class Vector>
void TestVectorEraseRange(void)
{
Vector v(6);
v[0] = 0; v[1] = 1; v[2] = 2; v[3] = 3; v[4] = 4; v[5] = 5;
v.erase(v.begin() + 1, v.begin() + 3);
ASSERT_EQUAL(v.size(), 4lu);
ASSERT_EQUAL(v[0], 0);
ASSERT_EQUAL(v[1], 3);
ASSERT_EQUAL(v[2], 4);
ASSERT_EQUAL(v[3], 5);
v.erase(v.begin() + 2, v.end());
ASSERT_EQUAL(v.size(), 2lu);
ASSERT_EQUAL(v[0], 0);
ASSERT_EQUAL(v[1], 3);
v.erase(v.begin() + 0, v.begin() + 1);
ASSERT_EQUAL(v.size(), 1lu);
ASSERT_EQUAL(v[0], 3);
v.erase(v.begin(), v.end());
ASSERT_EQUAL(v.size(), 0lu);
}
DECLARE_VECTOR_UNITTEST(TestVectorEraseRange);
void TestVectorEquality(void)
{
thrust::host_vector<int> h_a(3);
thrust::host_vector<int> h_b(3);
thrust::host_vector<int> h_c(3);
h_a[0] = 0; h_a[1] = 1; h_a[2] = 2;
h_b[0] = 0; h_b[1] = 1; h_b[2] = 3;
h_b[0] = 0; h_b[1] = 1;
thrust::device_vector<int> d_a(3);
thrust::device_vector<int> d_b(3);
thrust::device_vector<int> d_c(3);
d_a[0] = 0; d_a[1] = 1; d_a[2] = 2;
d_b[0] = 0; d_b[1] = 1; d_b[2] = 3;
d_b[0] = 0; d_b[1] = 1;
std::vector<int> s_a(3);
std::vector<int> s_b(3);
std::vector<int> s_c(3);
s_a[0] = 0; s_a[1] = 1; s_a[2] = 2;
s_b[0] = 0; s_b[1] = 1; s_b[2] = 3;
s_b[0] = 0; s_b[1] = 1;
ASSERT_EQUAL((h_a == h_a), true); ASSERT_EQUAL((h_a == d_a), true); ASSERT_EQUAL((d_a == h_a), true); ASSERT_EQUAL((d_a == d_a), true);
ASSERT_EQUAL((h_b == h_b), true); ASSERT_EQUAL((h_b == d_b), true); ASSERT_EQUAL((d_b == h_b), true); ASSERT_EQUAL((d_b == d_b), true);
ASSERT_EQUAL((h_c == h_c), true); ASSERT_EQUAL((h_c == d_c), true); ASSERT_EQUAL((d_c == h_c), true); ASSERT_EQUAL((d_c == d_c), true);
// test vector vs device_vector
ASSERT_EQUAL((s_a == d_a), true); ASSERT_EQUAL((d_a == s_a), true);
ASSERT_EQUAL((s_b == d_b), true); ASSERT_EQUAL((d_b == s_b), true);
ASSERT_EQUAL((s_c == d_c), true); ASSERT_EQUAL((d_c == s_c), true);
// test vector vs host_vector
ASSERT_EQUAL((s_a == h_a), true); ASSERT_EQUAL((h_a == s_a), true);
ASSERT_EQUAL((s_b == h_b), true); ASSERT_EQUAL((h_b == s_b), true);
ASSERT_EQUAL((s_c == h_c), true); ASSERT_EQUAL((h_c == s_c), true);
ASSERT_EQUAL((h_a == h_b), false); ASSERT_EQUAL((h_a == d_b), false); ASSERT_EQUAL((d_a == h_b), false); ASSERT_EQUAL((d_a == d_b), false);
ASSERT_EQUAL((h_b == h_a), false); ASSERT_EQUAL((h_b == d_a), false); ASSERT_EQUAL((d_b == h_a), false); ASSERT_EQUAL((d_b == d_a), false);
ASSERT_EQUAL((h_a == h_c), false); ASSERT_EQUAL((h_a == d_c), false); ASSERT_EQUAL((d_a == h_c), false); ASSERT_EQUAL((d_a == d_c), false);
ASSERT_EQUAL((h_c == h_a), false); ASSERT_EQUAL((h_c == d_a), false); ASSERT_EQUAL((d_c == h_a), false); ASSERT_EQUAL((d_c == d_a), false);
ASSERT_EQUAL((h_b == h_c), false); ASSERT_EQUAL((h_b == d_c), false); ASSERT_EQUAL((d_b == h_c), false); ASSERT_EQUAL((d_b == d_c), false);
ASSERT_EQUAL((h_c == h_b), false); ASSERT_EQUAL((h_c == d_b), false); ASSERT_EQUAL((d_c == h_b), false); ASSERT_EQUAL((d_c == d_b), false);
// test vector vs device_vector
ASSERT_EQUAL((s_a == d_b), false); ASSERT_EQUAL((d_a == s_b), false);
ASSERT_EQUAL((s_b == d_a), false); ASSERT_EQUAL((d_b == s_a), false);
ASSERT_EQUAL((s_a == d_c), false); ASSERT_EQUAL((d_a == s_c), false);
ASSERT_EQUAL((s_c == d_a), false); ASSERT_EQUAL((d_c == s_a), false);
ASSERT_EQUAL((s_b == d_c), false); ASSERT_EQUAL((d_b == s_c), false);
ASSERT_EQUAL((s_c == d_b), false); ASSERT_EQUAL((d_c == s_b), false);
// test vector vs host_vector
ASSERT_EQUAL((s_a == h_b), false); ASSERT_EQUAL((h_a == s_b), false);
ASSERT_EQUAL((s_b == h_a), false); ASSERT_EQUAL((h_b == s_a), false);
ASSERT_EQUAL((s_a == h_c), false); ASSERT_EQUAL((h_a == s_c), false);
ASSERT_EQUAL((s_c == h_a), false); ASSERT_EQUAL((h_c == s_a), false);
ASSERT_EQUAL((s_b == h_c), false); ASSERT_EQUAL((h_b == s_c), false);
ASSERT_EQUAL((s_c == h_b), false); ASSERT_EQUAL((h_c == s_b), false);
}
DECLARE_UNITTEST(TestVectorEquality);
void TestVectorInequality(void)
{
thrust::host_vector<int> h_a(3);
thrust::host_vector<int> h_b(3);
thrust::host_vector<int> h_c(3);
h_a[0] = 0; h_a[1] = 1; h_a[2] = 2;
h_b[0] = 0; h_b[1] = 1; h_b[2] = 3;
h_b[0] = 0; h_b[1] = 1;
thrust::device_vector<int> d_a(3);
thrust::device_vector<int> d_b(3);
thrust::device_vector<int> d_c(3);
d_a[0] = 0; d_a[1] = 1; d_a[2] = 2;
d_b[0] = 0; d_b[1] = 1; d_b[2] = 3;
d_b[0] = 0; d_b[1] = 1;
std::vector<int> s_a(3);
std::vector<int> s_b(3);
std::vector<int> s_c(3);
s_a[0] = 0; s_a[1] = 1; s_a[2] = 2;
s_b[0] = 0; s_b[1] = 1; s_b[2] = 3;
s_b[0] = 0; s_b[1] = 1;
ASSERT_EQUAL((h_a != h_a), false); ASSERT_EQUAL((h_a != d_a), false); ASSERT_EQUAL((d_a != h_a), false); ASSERT_EQUAL((d_a != d_a), false);
ASSERT_EQUAL((h_b != h_b), false); ASSERT_EQUAL((h_b != d_b), false); ASSERT_EQUAL((d_b != h_b), false); ASSERT_EQUAL((d_b != d_b), false);
ASSERT_EQUAL((h_c != h_c), false); ASSERT_EQUAL((h_c != d_c), false); ASSERT_EQUAL((d_c != h_c), false); ASSERT_EQUAL((d_c != d_c), false);
// test vector vs device_vector
ASSERT_EQUAL((s_a != d_a), false); ASSERT_EQUAL((d_a != s_a), false);
ASSERT_EQUAL((s_b != d_b), false); ASSERT_EQUAL((d_b != s_b), false);
ASSERT_EQUAL((s_c != d_c), false); ASSERT_EQUAL((d_c != s_c), false);
// test vector vs host_vector
ASSERT_EQUAL((s_a != h_a), false); ASSERT_EQUAL((h_a != s_a), false);
ASSERT_EQUAL((s_b != h_b), false); ASSERT_EQUAL((h_b != s_b), false);
ASSERT_EQUAL((s_c != h_c), false); ASSERT_EQUAL((h_c != s_c), false);
ASSERT_EQUAL((h_a != h_b), true); ASSERT_EQUAL((h_a != d_b), true); ASSERT_EQUAL((d_a != h_b), true); ASSERT_EQUAL((d_a != d_b), true);
ASSERT_EQUAL((h_b != h_a), true); ASSERT_EQUAL((h_b != d_a), true); ASSERT_EQUAL((d_b != h_a), true); ASSERT_EQUAL((d_b != d_a), true);
ASSERT_EQUAL((h_a != h_c), true); ASSERT_EQUAL((h_a != d_c), true); ASSERT_EQUAL((d_a != h_c), true); ASSERT_EQUAL((d_a != d_c), true);
ASSERT_EQUAL((h_c != h_a), true); ASSERT_EQUAL((h_c != d_a), true); ASSERT_EQUAL((d_c != h_a), true); ASSERT_EQUAL((d_c != d_a), true);
ASSERT_EQUAL((h_b != h_c), true); ASSERT_EQUAL((h_b != d_c), true); ASSERT_EQUAL((d_b != h_c), true); ASSERT_EQUAL((d_b != d_c), true);
ASSERT_EQUAL((h_c != h_b), true); ASSERT_EQUAL((h_c != d_b), true); ASSERT_EQUAL((d_c != h_b), true); ASSERT_EQUAL((d_c != d_b), true);
// test vector vs device_vector
ASSERT_EQUAL((s_a != d_b), true); ASSERT_EQUAL((d_a != s_b), true);
ASSERT_EQUAL((s_b != d_a), true); ASSERT_EQUAL((d_b != s_a), true);
ASSERT_EQUAL((s_a != d_c), true); ASSERT_EQUAL((d_a != s_c), true);
ASSERT_EQUAL((s_c != d_a), true); ASSERT_EQUAL((d_c != s_a), true);
ASSERT_EQUAL((s_b != d_c), true); ASSERT_EQUAL((d_b != s_c), true);
ASSERT_EQUAL((s_c != d_b), true); ASSERT_EQUAL((d_c != s_b), true);
// test vector vs host_vector
ASSERT_EQUAL((s_a != h_b), true); ASSERT_EQUAL((h_a != s_b), true);
ASSERT_EQUAL((s_b != h_a), true); ASSERT_EQUAL((h_b != s_a), true);
ASSERT_EQUAL((s_a != h_c), true); ASSERT_EQUAL((h_a != s_c), true);
ASSERT_EQUAL((s_c != h_a), true); ASSERT_EQUAL((h_c != s_a), true);
ASSERT_EQUAL((s_b != h_c), true); ASSERT_EQUAL((h_b != s_c), true);
ASSERT_EQUAL((s_c != h_b), true); ASSERT_EQUAL((h_c != s_b), true);
}
DECLARE_UNITTEST(TestVectorInequality);
template <class Vector>
void TestVectorResizing(void)
{
Vector v;
v.resize(3);
ASSERT_EQUAL(v.size(), 3lu);
v[0] = 0; v[1] = 1; v[2] = 2;
v.resize(5);
ASSERT_EQUAL(v.size(), 5lu);
ASSERT_EQUAL(v[0], 0);
ASSERT_EQUAL(v[1], 1);
ASSERT_EQUAL(v[2], 2);
v[3] = 3; v[4] = 4;
v.resize(4);
ASSERT_EQUAL(v.size(), 4lu);
ASSERT_EQUAL(v[0], 0);
ASSERT_EQUAL(v[1], 1);
ASSERT_EQUAL(v[2], 2);
ASSERT_EQUAL(v[3], 3);
v.resize(0);
ASSERT_EQUAL(v.size(), 0lu);
// TODO remove this WAR
#if defined(__HIPCC__) && CUDART_VERSION==3000
// depending on sizeof(T), we will receive one
// of two possible exceptions
try
{
v.resize(std::numeric_limits<size_t>::max());
}
catch(std::length_error e) {}
catch(std::bad_alloc e)
{
// reset the CUDA error
hipGetLastError();
} // end catch
#endif // defined(__HIPCC__) && CUDART_VERSION==3000
ASSERT_EQUAL(v.size(), 0lu);
}
DECLARE_VECTOR_UNITTEST(TestVectorResizing);
template <class Vector>
void TestVectorReserving(void)
{
Vector v;
v.reserve(3);
ASSERT_GEQUAL(v.capacity(), 3lu);
size_t old_capacity = v.capacity();
v.reserve(0);
ASSERT_EQUAL(v.capacity(), old_capacity);
// TODO remove this WAR
#if defined(__HIPCC__) && CUDART_VERSION==3000
try
{
v.reserve(std::numeric_limits<size_t>::max());
}
catch(std::length_error e) {}
catch(std::bad_alloc e) {}
#endif // defined(__HIPCC__) && CUDART_VERSION==3000
ASSERT_EQUAL(v.capacity(), old_capacity);
}
DECLARE_VECTOR_UNITTEST(TestVectorReserving)
template <class Vector>
void TestVectorShrinkToFit(void)
{
typedef typename Vector::value_type T;
Vector v;
v.reserve(200);
ASSERT_GEQUAL(v.capacity(), 200lu);
v.push_back(1);
v.push_back(2);
v.push_back(3);
v.shrink_to_fit();
ASSERT_EQUAL(T(1), v[0]);
ASSERT_EQUAL(T(2), v[1]);
ASSERT_EQUAL(T(3), v[2]);
ASSERT_EQUAL(3lu, v.size());
ASSERT_EQUAL(3lu, v.capacity());
}
DECLARE_VECTOR_UNITTEST(TestVectorShrinkToFit)
template <int N>
struct LargeStruct
{
int data[N];
__host__ __device__
bool operator==(const LargeStruct & ls) const
{
for (int i = 0; i < N; i++)
if (data[i] != ls.data[i])
return false;
return true;
}
};
void TestVectorContainingLargeType(void)
{
// Thrust issue #5
// http://code.google.com/p/thrust/issues/detail?id=5
const static int N = 100;
typedef LargeStruct<N> T;
thrust::device_vector<T> dv1;
thrust::host_vector<T> hv1;
ASSERT_EQUAL_QUIET(dv1, hv1);
thrust::device_vector<T> dv2(20);
thrust::host_vector<T> hv2(20);
ASSERT_EQUAL_QUIET(dv2, hv2);
// initialize tofirst element to something nonzero
T ls;
for (int i = 0; i < N; i++)
ls.data[i] = i;
thrust::device_vector<T> dv3(20, ls);
thrust::host_vector<T> hv3(20, ls);
ASSERT_EQUAL_QUIET(dv3, hv3);
// change first element
ls.data[0] = -13;
dv3[2] = ls;
hv3[2] = ls;
ASSERT_EQUAL_QUIET(dv3, hv3);
}
DECLARE_UNITTEST(TestVectorContainingLargeType);
template <typename Vector>
void TestVectorReversed(void)
{
Vector v(3);
v[0] = 0; v[1] = 1; v[2] = 2;
ASSERT_EQUAL(3, v.rend() - v.rbegin());
ASSERT_EQUAL(3, static_cast<const Vector&>(v).rend() - static_cast<const Vector&>(v).rbegin());
ASSERT_EQUAL(3, v.crend() - v.crbegin());
ASSERT_EQUAL(2, *v.rbegin());
ASSERT_EQUAL(2, *static_cast<const Vector&>(v).rbegin());
ASSERT_EQUAL(2, *v.crbegin());
ASSERT_EQUAL(1, *(v.rbegin() + 1));
ASSERT_EQUAL(0, *(v.rbegin() + 2));
ASSERT_EQUAL(0, *(v.rend() - 1));
ASSERT_EQUAL(1, *(v.rend() - 2));
}
DECLARE_VECTOR_UNITTEST(TestVectorReversed);
#if __cplusplus >= 201103L
template <class Vector>
void TestVectorMove(void)
{
//test move construction
Vector v1(3);
v1[0] = 0; v1[1] = 1; v1[2] = 2;
const auto ptr1 = v1.data();
const auto size1 = v1.size();
Vector v2(std::move(v1));
const auto ptr2 = v2.data();
const auto size2 = v2.size();
// ensure v1 was left empty
ASSERT_EQUAL(true, v1.empty());
// ensure v2 received the data from before
ASSERT_EQUAL(v2[0], 0);
ASSERT_EQUAL(v2[1], 1);
ASSERT_EQUAL(v2[2], 2);
ASSERT_EQUAL(size1, size2);
// ensure v2 received the pointer from before
ASSERT_EQUAL(ptr1, ptr2);
//test move assignment
Vector v3(3);
v3[0] = 3; v3[1] = 4; v3[2] = 5;
const auto ptr3 = v3.data();
const auto size3 = v3.size();
v2 = std::move(v3);
const auto ptr4 = v2.data();
const auto size4 = v2.size();
// ensure v3 was left empty
ASSERT_EQUAL(true, v3.empty());
// ensure v2 received the data from before
ASSERT_EQUAL(v2[0], 3);
ASSERT_EQUAL(v2[1], 4);
ASSERT_EQUAL(v2[2], 5);
ASSERT_EQUAL(size3, size4);
// ensure v2 received the pointer from before
ASSERT_EQUAL(ptr3, ptr4);
}
DECLARE_VECTOR_UNITTEST(TestVectorMove);
#endif
| c433643545159c15b340c05c07e558d544277c78.cu | #include <unittest/unittest.h>
#include <thrust/sequence.h>
#include <thrust/device_malloc_allocator.h>
#include <vector>
#include <list>
#include <limits>
#include <utility>
template <class Vector>
void TestVectorZeroSize(void)
{
Vector v;
ASSERT_EQUAL(v.size(), 0lu);
ASSERT_EQUAL((v.begin() == v.end()), true);
}
DECLARE_VECTOR_UNITTEST(TestVectorZeroSize);
void TestVectorBool(void)
{
thrust::host_vector<bool> h(3);
thrust::device_vector<bool> d(3);
h[0] = true; h[1] = false; h[2] = true;
d[0] = true; d[1] = false; d[2] = true;
ASSERT_EQUAL(h[0], true);
ASSERT_EQUAL(h[1], false);
ASSERT_EQUAL(h[2], true);
ASSERT_EQUAL(d[0], true);
ASSERT_EQUAL(d[1], false);
ASSERT_EQUAL(d[2], true);
}
DECLARE_UNITTEST(TestVectorBool);
template <class Vector>
void TestVectorFrontBack(void)
{
typedef typename Vector::value_type T;
Vector v(3);
v[0] = 0; v[1] = 1; v[2] = 2;
ASSERT_EQUAL(v.front(), T(0));
ASSERT_EQUAL(v.back(), T(2));
}
DECLARE_VECTOR_UNITTEST(TestVectorFrontBack);
template <class Vector>
void TestVectorData(void)
{
typedef typename Vector::pointer PointerT;
typedef typename Vector::const_pointer PointerConstT;
Vector v(3);
v[0] = 0; v[1] = 1; v[2] = 2;
ASSERT_EQUAL(0, *v.data());
ASSERT_EQUAL(1, *(v.data() + 1));
ASSERT_EQUAL(2, *(v.data() + 2));
ASSERT_EQUAL(PointerT(&v.front()), v.data());
ASSERT_EQUAL(PointerT(&*v.begin()), v.data());
ASSERT_EQUAL(PointerT(&v[0]), v.data());
const Vector &c_v = v;
ASSERT_EQUAL(0, *c_v.data());
ASSERT_EQUAL(1, *(c_v.data() + 1));
ASSERT_EQUAL(2, *(c_v.data() + 2));
ASSERT_EQUAL(PointerConstT(&c_v.front()), c_v.data());
ASSERT_EQUAL(PointerConstT(&*c_v.begin()), c_v.data());
ASSERT_EQUAL(PointerConstT(&c_v[0]), c_v.data());
}
DECLARE_VECTOR_UNITTEST(TestVectorData);
template <class Vector>
void TestVectorElementAssignment(void)
{
Vector v(3);
v[0] = 0; v[1] = 1; v[2] = 2;
ASSERT_EQUAL(v[0], 0);
ASSERT_EQUAL(v[1], 1);
ASSERT_EQUAL(v[2], 2);
v[0] = 10; v[1] = 11; v[2] = 12;
ASSERT_EQUAL(v[0], 10);
ASSERT_EQUAL(v[1], 11);
ASSERT_EQUAL(v[2], 12);
Vector w(3);
w[0] = v[0];
w[1] = v[1];
w[2] = v[2];
ASSERT_EQUAL(v, w);
}
DECLARE_VECTOR_UNITTEST(TestVectorElementAssignment);
template <class Vector>
void TestVectorFromSTLVector(void)
{
typedef typename Vector::value_type T;
std::vector<T> stl_vector(3);
stl_vector[0] = 0;
stl_vector[1] = 1;
stl_vector[2] = 2;
thrust::host_vector<T> v(stl_vector);
ASSERT_EQUAL(v.size(), 3lu);
ASSERT_EQUAL(v[0], 0);
ASSERT_EQUAL(v[1], 1);
ASSERT_EQUAL(v[2], 2);
v = stl_vector;
ASSERT_EQUAL(v.size(), 3lu);
ASSERT_EQUAL(v[0], 0);
ASSERT_EQUAL(v[1], 1);
ASSERT_EQUAL(v[2], 2);
}
DECLARE_VECTOR_UNITTEST(TestVectorFromSTLVector);
template <class Vector>
void TestVectorFillAssign(void)
{
typedef typename Vector::value_type T;
thrust::host_vector<T> v;
v.assign(3, 13);
ASSERT_EQUAL(v.size(), 3lu);
ASSERT_EQUAL(v[0], 13);
ASSERT_EQUAL(v[1], 13);
ASSERT_EQUAL(v[2], 13);
}
DECLARE_VECTOR_UNITTEST(TestVectorFillAssign);
template <class Vector>
void TestVectorAssignFromSTLVector(void)
{
typedef typename Vector::value_type T;
std::vector<T> stl_vector(3);
stl_vector[0] = 0;
stl_vector[1] = 1;
stl_vector[2] = 2;
thrust::host_vector<T> v;
v.assign(stl_vector.begin(), stl_vector.end());
ASSERT_EQUAL(v.size(), 3lu);
ASSERT_EQUAL(v[0], 0);
ASSERT_EQUAL(v[1], 1);
ASSERT_EQUAL(v[2], 2);
}
DECLARE_VECTOR_UNITTEST(TestVectorAssignFromSTLVector);
template <class Vector>
void TestVectorFromBiDirectionalIterator(void)
{
typedef typename Vector::value_type T;
std::list<T> stl_list;
stl_list.push_back(0);
stl_list.push_back(1);
stl_list.push_back(2);
Vector v(stl_list.begin(), stl_list.end());
ASSERT_EQUAL(v.size(), 3lu);
ASSERT_EQUAL(v[0], 0);
ASSERT_EQUAL(v[1], 1);
ASSERT_EQUAL(v[2], 2);
}
DECLARE_VECTOR_UNITTEST(TestVectorFromBiDirectionalIterator);
template <class Vector>
void TestVectorAssignFromBiDirectionalIterator(void)
{
typedef typename Vector::value_type T;
std::list<T> stl_list;
stl_list.push_back(0);
stl_list.push_back(1);
stl_list.push_back(2);
Vector v;
v.assign(stl_list.begin(), stl_list.end());
ASSERT_EQUAL(v.size(), 3lu);
ASSERT_EQUAL(v[0], 0);
ASSERT_EQUAL(v[1], 1);
ASSERT_EQUAL(v[2], 2);
}
DECLARE_VECTOR_UNITTEST(TestVectorAssignFromBiDirectionalIterator);
template <class Vector>
void TestVectorAssignFromHostVector(void)
{
typedef typename Vector::value_type T;
thrust::host_vector<T> h(3);
h[0] = 0;
h[1] = 1;
h[2] = 2;
Vector v;
v.assign(h.begin(), h.end());
ASSERT_EQUAL(v, h);
}
DECLARE_VECTOR_UNITTEST(TestVectorAssignFromHostVector);
template <class Vector>
void TestVectorToAndFromHostVector(void)
{
typedef typename Vector::value_type T;
thrust::host_vector<T> h(3);
h[0] = 0;
h[1] = 1;
h[2] = 2;
Vector v(h);
ASSERT_EQUAL(v, h);
THRUST_DISABLE_CLANG_SELF_ASSIGNMENT_WARNING(v = v);
ASSERT_EQUAL(v, h);
v[0] = 10;
v[1] = 11;
v[2] = 12;
ASSERT_EQUAL(h[0], 0); ASSERT_EQUAL(v[0], 10);
ASSERT_EQUAL(h[1], 1); ASSERT_EQUAL(v[1], 11);
ASSERT_EQUAL(h[2], 2); ASSERT_EQUAL(v[2], 12);
h = v;
ASSERT_EQUAL(v, h);
h[1] = 11;
v = h;
ASSERT_EQUAL(v, h);
}
DECLARE_VECTOR_UNITTEST(TestVectorToAndFromHostVector);
template <class Vector>
void TestVectorAssignFromDeviceVector(void)
{
typedef typename Vector::value_type T;
thrust::device_vector<T> d(3);
d[0] = 0;
d[1] = 1;
d[2] = 2;
Vector v;
v.assign(d.begin(), d.end());
ASSERT_EQUAL(v, d);
}
DECLARE_VECTOR_UNITTEST(TestVectorAssignFromDeviceVector);
template <class Vector>
void TestVectorToAndFromDeviceVector(void)
{
typedef typename Vector::value_type T;
thrust::device_vector<T> h(3);
h[0] = 0;
h[1] = 1;
h[2] = 2;
Vector v(h);
ASSERT_EQUAL(v, h);
THRUST_DISABLE_CLANG_SELF_ASSIGNMENT_WARNING(v = v);
ASSERT_EQUAL(v, h);
v[0] = 10;
v[1] = 11;
v[2] = 12;
ASSERT_EQUAL(h[0], 0); ASSERT_EQUAL(v[0], 10);
ASSERT_EQUAL(h[1], 1); ASSERT_EQUAL(v[1], 11);
ASSERT_EQUAL(h[2], 2); ASSERT_EQUAL(v[2], 12);
h = v;
ASSERT_EQUAL(v, h);
h[1] = 11;
v = h;
ASSERT_EQUAL(v, h);
}
DECLARE_VECTOR_UNITTEST(TestVectorToAndFromDeviceVector);
template <class Vector>
void TestVectorWithInitialValue(void)
{
typedef typename Vector::value_type T;
const T init = 17;
Vector v(3, init);
ASSERT_EQUAL(v.size(), 3lu);
ASSERT_EQUAL(v[0], init);
ASSERT_EQUAL(v[1], init);
ASSERT_EQUAL(v[2], init);
}
DECLARE_VECTOR_UNITTEST(TestVectorWithInitialValue);
template <class Vector>
void TestVectorSwap(void)
{
Vector v(3);
v[0] = 0; v[1] = 1; v[2] = 2;
Vector u(3);
u[0] = 10; u[1] = 11; u[2] = 12;
v.swap(u);
ASSERT_EQUAL(v[0], 10); ASSERT_EQUAL(u[0], 0);
ASSERT_EQUAL(v[1], 11); ASSERT_EQUAL(u[1], 1);
ASSERT_EQUAL(v[2], 12); ASSERT_EQUAL(u[2], 2);
}
DECLARE_VECTOR_UNITTEST(TestVectorSwap);
template <class Vector>
void TestVectorErasePosition(void)
{
Vector v(5);
v[0] = 0; v[1] = 1; v[2] = 2; v[3] = 3; v[4] = 4;
v.erase(v.begin() + 2);
ASSERT_EQUAL(v.size(), 4lu);
ASSERT_EQUAL(v[0], 0);
ASSERT_EQUAL(v[1], 1);
ASSERT_EQUAL(v[2], 3);
ASSERT_EQUAL(v[3], 4);
v.erase(v.begin() + 0);
ASSERT_EQUAL(v.size(), 3lu);
ASSERT_EQUAL(v[0], 1);
ASSERT_EQUAL(v[1], 3);
ASSERT_EQUAL(v[2], 4);
v.erase(v.begin() + 2);
ASSERT_EQUAL(v.size(), 2lu);
ASSERT_EQUAL(v[0], 1);
ASSERT_EQUAL(v[1], 3);
v.erase(v.begin() + 1);
ASSERT_EQUAL(v.size(), 1lu);
ASSERT_EQUAL(v[0], 1);
v.erase(v.begin() + 0);
ASSERT_EQUAL(v.size(), 0lu);
}
DECLARE_VECTOR_UNITTEST(TestVectorErasePosition);
template <class Vector>
void TestVectorEraseRange(void)
{
Vector v(6);
v[0] = 0; v[1] = 1; v[2] = 2; v[3] = 3; v[4] = 4; v[5] = 5;
v.erase(v.begin() + 1, v.begin() + 3);
ASSERT_EQUAL(v.size(), 4lu);
ASSERT_EQUAL(v[0], 0);
ASSERT_EQUAL(v[1], 3);
ASSERT_EQUAL(v[2], 4);
ASSERT_EQUAL(v[3], 5);
v.erase(v.begin() + 2, v.end());
ASSERT_EQUAL(v.size(), 2lu);
ASSERT_EQUAL(v[0], 0);
ASSERT_EQUAL(v[1], 3);
v.erase(v.begin() + 0, v.begin() + 1);
ASSERT_EQUAL(v.size(), 1lu);
ASSERT_EQUAL(v[0], 3);
v.erase(v.begin(), v.end());
ASSERT_EQUAL(v.size(), 0lu);
}
DECLARE_VECTOR_UNITTEST(TestVectorEraseRange);
void TestVectorEquality(void)
{
thrust::host_vector<int> h_a(3);
thrust::host_vector<int> h_b(3);
thrust::host_vector<int> h_c(3);
h_a[0] = 0; h_a[1] = 1; h_a[2] = 2;
h_b[0] = 0; h_b[1] = 1; h_b[2] = 3;
h_b[0] = 0; h_b[1] = 1;
thrust::device_vector<int> d_a(3);
thrust::device_vector<int> d_b(3);
thrust::device_vector<int> d_c(3);
d_a[0] = 0; d_a[1] = 1; d_a[2] = 2;
d_b[0] = 0; d_b[1] = 1; d_b[2] = 3;
d_b[0] = 0; d_b[1] = 1;
std::vector<int> s_a(3);
std::vector<int> s_b(3);
std::vector<int> s_c(3);
s_a[0] = 0; s_a[1] = 1; s_a[2] = 2;
s_b[0] = 0; s_b[1] = 1; s_b[2] = 3;
s_b[0] = 0; s_b[1] = 1;
ASSERT_EQUAL((h_a == h_a), true); ASSERT_EQUAL((h_a == d_a), true); ASSERT_EQUAL((d_a == h_a), true); ASSERT_EQUAL((d_a == d_a), true);
ASSERT_EQUAL((h_b == h_b), true); ASSERT_EQUAL((h_b == d_b), true); ASSERT_EQUAL((d_b == h_b), true); ASSERT_EQUAL((d_b == d_b), true);
ASSERT_EQUAL((h_c == h_c), true); ASSERT_EQUAL((h_c == d_c), true); ASSERT_EQUAL((d_c == h_c), true); ASSERT_EQUAL((d_c == d_c), true);
// test vector vs device_vector
ASSERT_EQUAL((s_a == d_a), true); ASSERT_EQUAL((d_a == s_a), true);
ASSERT_EQUAL((s_b == d_b), true); ASSERT_EQUAL((d_b == s_b), true);
ASSERT_EQUAL((s_c == d_c), true); ASSERT_EQUAL((d_c == s_c), true);
// test vector vs host_vector
ASSERT_EQUAL((s_a == h_a), true); ASSERT_EQUAL((h_a == s_a), true);
ASSERT_EQUAL((s_b == h_b), true); ASSERT_EQUAL((h_b == s_b), true);
ASSERT_EQUAL((s_c == h_c), true); ASSERT_EQUAL((h_c == s_c), true);
ASSERT_EQUAL((h_a == h_b), false); ASSERT_EQUAL((h_a == d_b), false); ASSERT_EQUAL((d_a == h_b), false); ASSERT_EQUAL((d_a == d_b), false);
ASSERT_EQUAL((h_b == h_a), false); ASSERT_EQUAL((h_b == d_a), false); ASSERT_EQUAL((d_b == h_a), false); ASSERT_EQUAL((d_b == d_a), false);
ASSERT_EQUAL((h_a == h_c), false); ASSERT_EQUAL((h_a == d_c), false); ASSERT_EQUAL((d_a == h_c), false); ASSERT_EQUAL((d_a == d_c), false);
ASSERT_EQUAL((h_c == h_a), false); ASSERT_EQUAL((h_c == d_a), false); ASSERT_EQUAL((d_c == h_a), false); ASSERT_EQUAL((d_c == d_a), false);
ASSERT_EQUAL((h_b == h_c), false); ASSERT_EQUAL((h_b == d_c), false); ASSERT_EQUAL((d_b == h_c), false); ASSERT_EQUAL((d_b == d_c), false);
ASSERT_EQUAL((h_c == h_b), false); ASSERT_EQUAL((h_c == d_b), false); ASSERT_EQUAL((d_c == h_b), false); ASSERT_EQUAL((d_c == d_b), false);
// test vector vs device_vector
ASSERT_EQUAL((s_a == d_b), false); ASSERT_EQUAL((d_a == s_b), false);
ASSERT_EQUAL((s_b == d_a), false); ASSERT_EQUAL((d_b == s_a), false);
ASSERT_EQUAL((s_a == d_c), false); ASSERT_EQUAL((d_a == s_c), false);
ASSERT_EQUAL((s_c == d_a), false); ASSERT_EQUAL((d_c == s_a), false);
ASSERT_EQUAL((s_b == d_c), false); ASSERT_EQUAL((d_b == s_c), false);
ASSERT_EQUAL((s_c == d_b), false); ASSERT_EQUAL((d_c == s_b), false);
// test vector vs host_vector
ASSERT_EQUAL((s_a == h_b), false); ASSERT_EQUAL((h_a == s_b), false);
ASSERT_EQUAL((s_b == h_a), false); ASSERT_EQUAL((h_b == s_a), false);
ASSERT_EQUAL((s_a == h_c), false); ASSERT_EQUAL((h_a == s_c), false);
ASSERT_EQUAL((s_c == h_a), false); ASSERT_EQUAL((h_c == s_a), false);
ASSERT_EQUAL((s_b == h_c), false); ASSERT_EQUAL((h_b == s_c), false);
ASSERT_EQUAL((s_c == h_b), false); ASSERT_EQUAL((h_c == s_b), false);
}
DECLARE_UNITTEST(TestVectorEquality);
void TestVectorInequality(void)
{
thrust::host_vector<int> h_a(3);
thrust::host_vector<int> h_b(3);
thrust::host_vector<int> h_c(3);
h_a[0] = 0; h_a[1] = 1; h_a[2] = 2;
h_b[0] = 0; h_b[1] = 1; h_b[2] = 3;
h_b[0] = 0; h_b[1] = 1;
thrust::device_vector<int> d_a(3);
thrust::device_vector<int> d_b(3);
thrust::device_vector<int> d_c(3);
d_a[0] = 0; d_a[1] = 1; d_a[2] = 2;
d_b[0] = 0; d_b[1] = 1; d_b[2] = 3;
d_b[0] = 0; d_b[1] = 1;
std::vector<int> s_a(3);
std::vector<int> s_b(3);
std::vector<int> s_c(3);
s_a[0] = 0; s_a[1] = 1; s_a[2] = 2;
s_b[0] = 0; s_b[1] = 1; s_b[2] = 3;
s_b[0] = 0; s_b[1] = 1;
ASSERT_EQUAL((h_a != h_a), false); ASSERT_EQUAL((h_a != d_a), false); ASSERT_EQUAL((d_a != h_a), false); ASSERT_EQUAL((d_a != d_a), false);
ASSERT_EQUAL((h_b != h_b), false); ASSERT_EQUAL((h_b != d_b), false); ASSERT_EQUAL((d_b != h_b), false); ASSERT_EQUAL((d_b != d_b), false);
ASSERT_EQUAL((h_c != h_c), false); ASSERT_EQUAL((h_c != d_c), false); ASSERT_EQUAL((d_c != h_c), false); ASSERT_EQUAL((d_c != d_c), false);
// test vector vs device_vector
ASSERT_EQUAL((s_a != d_a), false); ASSERT_EQUAL((d_a != s_a), false);
ASSERT_EQUAL((s_b != d_b), false); ASSERT_EQUAL((d_b != s_b), false);
ASSERT_EQUAL((s_c != d_c), false); ASSERT_EQUAL((d_c != s_c), false);
// test vector vs host_vector
ASSERT_EQUAL((s_a != h_a), false); ASSERT_EQUAL((h_a != s_a), false);
ASSERT_EQUAL((s_b != h_b), false); ASSERT_EQUAL((h_b != s_b), false);
ASSERT_EQUAL((s_c != h_c), false); ASSERT_EQUAL((h_c != s_c), false);
ASSERT_EQUAL((h_a != h_b), true); ASSERT_EQUAL((h_a != d_b), true); ASSERT_EQUAL((d_a != h_b), true); ASSERT_EQUAL((d_a != d_b), true);
ASSERT_EQUAL((h_b != h_a), true); ASSERT_EQUAL((h_b != d_a), true); ASSERT_EQUAL((d_b != h_a), true); ASSERT_EQUAL((d_b != d_a), true);
ASSERT_EQUAL((h_a != h_c), true); ASSERT_EQUAL((h_a != d_c), true); ASSERT_EQUAL((d_a != h_c), true); ASSERT_EQUAL((d_a != d_c), true);
ASSERT_EQUAL((h_c != h_a), true); ASSERT_EQUAL((h_c != d_a), true); ASSERT_EQUAL((d_c != h_a), true); ASSERT_EQUAL((d_c != d_a), true);
ASSERT_EQUAL((h_b != h_c), true); ASSERT_EQUAL((h_b != d_c), true); ASSERT_EQUAL((d_b != h_c), true); ASSERT_EQUAL((d_b != d_c), true);
ASSERT_EQUAL((h_c != h_b), true); ASSERT_EQUAL((h_c != d_b), true); ASSERT_EQUAL((d_c != h_b), true); ASSERT_EQUAL((d_c != d_b), true);
// test vector vs device_vector
ASSERT_EQUAL((s_a != d_b), true); ASSERT_EQUAL((d_a != s_b), true);
ASSERT_EQUAL((s_b != d_a), true); ASSERT_EQUAL((d_b != s_a), true);
ASSERT_EQUAL((s_a != d_c), true); ASSERT_EQUAL((d_a != s_c), true);
ASSERT_EQUAL((s_c != d_a), true); ASSERT_EQUAL((d_c != s_a), true);
ASSERT_EQUAL((s_b != d_c), true); ASSERT_EQUAL((d_b != s_c), true);
ASSERT_EQUAL((s_c != d_b), true); ASSERT_EQUAL((d_c != s_b), true);
// test vector vs host_vector
ASSERT_EQUAL((s_a != h_b), true); ASSERT_EQUAL((h_a != s_b), true);
ASSERT_EQUAL((s_b != h_a), true); ASSERT_EQUAL((h_b != s_a), true);
ASSERT_EQUAL((s_a != h_c), true); ASSERT_EQUAL((h_a != s_c), true);
ASSERT_EQUAL((s_c != h_a), true); ASSERT_EQUAL((h_c != s_a), true);
ASSERT_EQUAL((s_b != h_c), true); ASSERT_EQUAL((h_b != s_c), true);
ASSERT_EQUAL((s_c != h_b), true); ASSERT_EQUAL((h_c != s_b), true);
}
DECLARE_UNITTEST(TestVectorInequality);
template <class Vector>
void TestVectorResizing(void)
{
Vector v;
v.resize(3);
ASSERT_EQUAL(v.size(), 3lu);
v[0] = 0; v[1] = 1; v[2] = 2;
v.resize(5);
ASSERT_EQUAL(v.size(), 5lu);
ASSERT_EQUAL(v[0], 0);
ASSERT_EQUAL(v[1], 1);
ASSERT_EQUAL(v[2], 2);
v[3] = 3; v[4] = 4;
v.resize(4);
ASSERT_EQUAL(v.size(), 4lu);
ASSERT_EQUAL(v[0], 0);
ASSERT_EQUAL(v[1], 1);
ASSERT_EQUAL(v[2], 2);
ASSERT_EQUAL(v[3], 3);
v.resize(0);
ASSERT_EQUAL(v.size(), 0lu);
// TODO remove this WAR
#if defined(__CUDACC__) && CUDART_VERSION==3000
// depending on sizeof(T), we will receive one
// of two possible exceptions
try
{
v.resize(std::numeric_limits<size_t>::max());
}
catch(std::length_error e) {}
catch(std::bad_alloc e)
{
// reset the CUDA error
cudaGetLastError();
} // end catch
#endif // defined(__CUDACC__) && CUDART_VERSION==3000
ASSERT_EQUAL(v.size(), 0lu);
}
DECLARE_VECTOR_UNITTEST(TestVectorResizing);
template <class Vector>
void TestVectorReserving(void)
{
Vector v;
v.reserve(3);
ASSERT_GEQUAL(v.capacity(), 3lu);
size_t old_capacity = v.capacity();
v.reserve(0);
ASSERT_EQUAL(v.capacity(), old_capacity);
// TODO remove this WAR
#if defined(__CUDACC__) && CUDART_VERSION==3000
try
{
v.reserve(std::numeric_limits<size_t>::max());
}
catch(std::length_error e) {}
catch(std::bad_alloc e) {}
#endif // defined(__CUDACC__) && CUDART_VERSION==3000
ASSERT_EQUAL(v.capacity(), old_capacity);
}
DECLARE_VECTOR_UNITTEST(TestVectorReserving)
template <class Vector>
void TestVectorShrinkToFit(void)
{
typedef typename Vector::value_type T;
Vector v;
v.reserve(200);
ASSERT_GEQUAL(v.capacity(), 200lu);
v.push_back(1);
v.push_back(2);
v.push_back(3);
v.shrink_to_fit();
ASSERT_EQUAL(T(1), v[0]);
ASSERT_EQUAL(T(2), v[1]);
ASSERT_EQUAL(T(3), v[2]);
ASSERT_EQUAL(3lu, v.size());
ASSERT_EQUAL(3lu, v.capacity());
}
DECLARE_VECTOR_UNITTEST(TestVectorShrinkToFit)
template <int N>
struct LargeStruct
{
int data[N];
__host__ __device__
bool operator==(const LargeStruct & ls) const
{
for (int i = 0; i < N; i++)
if (data[i] != ls.data[i])
return false;
return true;
}
};
void TestVectorContainingLargeType(void)
{
// Thrust issue #5
// http://code.google.com/p/thrust/issues/detail?id=5
const static int N = 100;
typedef LargeStruct<N> T;
thrust::device_vector<T> dv1;
thrust::host_vector<T> hv1;
ASSERT_EQUAL_QUIET(dv1, hv1);
thrust::device_vector<T> dv2(20);
thrust::host_vector<T> hv2(20);
ASSERT_EQUAL_QUIET(dv2, hv2);
// initialize tofirst element to something nonzero
T ls;
for (int i = 0; i < N; i++)
ls.data[i] = i;
thrust::device_vector<T> dv3(20, ls);
thrust::host_vector<T> hv3(20, ls);
ASSERT_EQUAL_QUIET(dv3, hv3);
// change first element
ls.data[0] = -13;
dv3[2] = ls;
hv3[2] = ls;
ASSERT_EQUAL_QUIET(dv3, hv3);
}
DECLARE_UNITTEST(TestVectorContainingLargeType);
template <typename Vector>
void TestVectorReversed(void)
{
Vector v(3);
v[0] = 0; v[1] = 1; v[2] = 2;
ASSERT_EQUAL(3, v.rend() - v.rbegin());
ASSERT_EQUAL(3, static_cast<const Vector&>(v).rend() - static_cast<const Vector&>(v).rbegin());
ASSERT_EQUAL(3, v.crend() - v.crbegin());
ASSERT_EQUAL(2, *v.rbegin());
ASSERT_EQUAL(2, *static_cast<const Vector&>(v).rbegin());
ASSERT_EQUAL(2, *v.crbegin());
ASSERT_EQUAL(1, *(v.rbegin() + 1));
ASSERT_EQUAL(0, *(v.rbegin() + 2));
ASSERT_EQUAL(0, *(v.rend() - 1));
ASSERT_EQUAL(1, *(v.rend() - 2));
}
DECLARE_VECTOR_UNITTEST(TestVectorReversed);
#if __cplusplus >= 201103L
template <class Vector>
void TestVectorMove(void)
{
//test move construction
Vector v1(3);
v1[0] = 0; v1[1] = 1; v1[2] = 2;
const auto ptr1 = v1.data();
const auto size1 = v1.size();
Vector v2(std::move(v1));
const auto ptr2 = v2.data();
const auto size2 = v2.size();
// ensure v1 was left empty
ASSERT_EQUAL(true, v1.empty());
// ensure v2 received the data from before
ASSERT_EQUAL(v2[0], 0);
ASSERT_EQUAL(v2[1], 1);
ASSERT_EQUAL(v2[2], 2);
ASSERT_EQUAL(size1, size2);
// ensure v2 received the pointer from before
ASSERT_EQUAL(ptr1, ptr2);
//test move assignment
Vector v3(3);
v3[0] = 3; v3[1] = 4; v3[2] = 5;
const auto ptr3 = v3.data();
const auto size3 = v3.size();
v2 = std::move(v3);
const auto ptr4 = v2.data();
const auto size4 = v2.size();
// ensure v3 was left empty
ASSERT_EQUAL(true, v3.empty());
// ensure v2 received the data from before
ASSERT_EQUAL(v2[0], 3);
ASSERT_EQUAL(v2[1], 4);
ASSERT_EQUAL(v2[2], 5);
ASSERT_EQUAL(size3, size4);
// ensure v2 received the pointer from before
ASSERT_EQUAL(ptr3, ptr4);
}
DECLARE_VECTOR_UNITTEST(TestVectorMove);
#endif
|
e8f63b83f5ea71140c87f57b560ab288fa1669f5.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
// Implements the math functions for GPU.
#include "caffe2/utils/math.h"
#include <cstring>
#include <limits>
#include <numeric>
#include <vector>
#include <hipcub/hipcub.hpp>
#include <hipcub/hipcub.hpp>
#include <thrust/device_vector.h>
#include <thrust/functional.h>
#include "caffe2/core/context_gpu.h"
#include "caffe2/utils/conversions.h"
#include "caffe2/utils/fixed_divisor.h"
#include "caffe2/utils/math_utils.h"
#if THRUST_VERSION >= 100800
#define THRUST_SUPPORTS_PER_THREAD
#endif // THRUST_VERSION >= 100800
namespace caffe2 {
namespace math {
namespace {
#define DELEGATE_SIMPLE_HOST_DEVICE_BINARY_FUNCTOR(Func, expr) \
template <typename T> \
struct Func##Functor { \
inline __host__ __device__ T \
operator()(const T& lhs, const T& rhs) const { \
return lhs expr rhs; \
} \
}; \
template <> \
struct Func##Functor<at::Half> { \
inline __host__ __device__ at::Half \
operator()(const at::Half& lhs, const at::Half& rhs) const { \
return convert::To<float, at::Half>(convert::To<at::Half, float>( \
lhs) expr convert::To<at::Half, float>(rhs)); \
} \
};
DELEGATE_SIMPLE_HOST_DEVICE_BINARY_FUNCTOR(Add, +)
DELEGATE_SIMPLE_HOST_DEVICE_BINARY_FUNCTOR(Sub, -)
DELEGATE_SIMPLE_HOST_DEVICE_BINARY_FUNCTOR(Mul, *)
DELEGATE_SIMPLE_HOST_DEVICE_BINARY_FUNCTOR(Div, /)
#undef DELEGATE_SIMPLE_HOST_DEVICE_BINARY_FUNCTOR
template <typename T>
__global__ void SinCosCUDAKernel(const int N, const T* X, T* S, T* C) {
CUDA_1D_KERNEL_LOOP(i, N) {
#if __CUDA_ARCH__ >= 350
sincos(__ldg(X + i), S + i, C + i);
#else
sincos(X[i], S + i, C + i);
#endif
}
}
template <typename TIn, typename TOut, class BinaryOperator>
__global__ void SimpleBinaryOpCUDAKernel(
const int N,
const BinaryOperator op,
const TIn* A,
const TIn* B,
TOut* C) {
CUDA_1D_KERNEL_LOOP(i, N) {
C[i] = op(A[i], B[i]);
}
}
template <typename TIn, typename TOut, class BinaryOperator, bool broadcast_1st>
__global__ void RowwiseBinaryOpCUDAKenel(
const int size,
const FixedDivisor<int> cols,
const BinaryOperator op,
const TIn* A,
const TIn* B,
TOut* C) {
CUDA_1D_KERNEL_LOOP(C_index, size) {
const int j = cols.Mod(C_index);
const int A_index = broadcast_1st ? j : C_index;
const int B_index = broadcast_1st ? C_index : j;
C[C_index] = op(A[A_index], B[B_index]);
}
}
template <typename TIn, typename TOut, class BinaryOperator, bool broadcast_1st>
__global__ void ColwiseBinaryOpCUDAKenel(
const int size,
const FixedDivisor<int> cols,
const BinaryOperator op,
const TIn* A,
const TIn* B,
TOut* C) {
CUDA_1D_KERNEL_LOOP(C_index, size) {
const int i = cols.Div(C_index);
const int A_index = broadcast_1st ? i : C_index;
const int B_index = broadcast_1st ? C_index : i;
C[C_index] = op(A[A_index], B[B_index]);
}
}
template <typename TIn, typename TOut, class BinaryOperator, int D>
__global__ void BroadcastBinaryOpCUDAKernel(
const int size,
const SimpleArray<int, D> A_strides,
const SimpleArray<int, D> B_strides,
const SimpleArray<FixedDivisor<int>, D> C_dims,
const BinaryOperator op,
const TIn* A,
const TIn* B,
TOut* C) {
CUDA_1D_KERNEL_LOOP(C_index, size) {
int A_index = 0;
int B_index = 0;
int C_index_val = C_index;
#pragma unroll
for (int i = D - 1; i >= 0; --i) {
int d;
C_dims.data[i].DivMod(C_index_val, &C_index_val, &d);
A_index += d * A_strides.data[i];
B_index += d * B_strides.data[i];
}
C[C_index] = op(A[A_index], B[B_index]);
}
}
template <typename TIn, typename TOut, class BinaryOperator>
CAFFE2_CUDA_EXPORT void BinaryOpWith2DBroadcasting(
const int rows,
const int cols,
const bool rowwise_broadcast,
const bool broadcast_1st,
const BinaryOperator& op,
const TIn* A,
const TIn* B,
TOut* C,
CUDAContext* context) {
if (rows == 0 || cols == 0) {
return;
}
const int size = rows * cols;
const FixedDivisor<int> cols_div(cols);
if (rowwise_broadcast) {
if (broadcast_1st) {
hipLaunchKernelGGL(( RowwiseBinaryOpCUDAKenel<TIn, TOut, BinaryOperator, true>)
, dim3(CAFFE_GET_BLOCKS(size)),
dim3(CAFFE_CUDA_NUM_THREADS),
0,
context->cuda_stream(), size, cols_div, op, A, B, C);
} else {
hipLaunchKernelGGL(( RowwiseBinaryOpCUDAKenel<TIn, TOut, BinaryOperator, false>)
, dim3(CAFFE_GET_BLOCKS(size)),
dim3(CAFFE_CUDA_NUM_THREADS),
0,
context->cuda_stream(), size, cols_div, op, A, B, C);
}
} else {
if (broadcast_1st) {
hipLaunchKernelGGL(( ColwiseBinaryOpCUDAKenel<TIn, TOut, BinaryOperator, true>)
, dim3(CAFFE_GET_BLOCKS(size)),
dim3(CAFFE_CUDA_NUM_THREADS),
0,
context->cuda_stream(), size, cols_div, op, A, B, C);
} else {
hipLaunchKernelGGL(( ColwiseBinaryOpCUDAKenel<TIn, TOut, BinaryOperator, false>)
, dim3(CAFFE_GET_BLOCKS(size)),
dim3(CAFFE_CUDA_NUM_THREADS),
0,
context->cuda_stream(), size, cols_div, op, A, B, C);
}
}
}
template <typename TIn, typename TOut, class BinaryOperator, int D>
CAFFE2_CUDA_EXPORT void BroadcastBinaryOpImpl(
const int* A_dims,
const int* B_dims,
const int* C_dims,
const BinaryOperator& op,
const TIn* A,
const TIn* B,
TOut* C,
CUDAContext* context) {
SimpleArray<int, D> A_strides_array;
SimpleArray<int, D> B_strides_array;
SimpleArray<FixedDivisor<int>, D> C_dims_array;
int A_stride = 1;
int B_stride = 1;
for (int i = D - 1; i >= 0; --i) {
if (C_dims[i] == 0) {
return;
}
A_strides_array.data[i] = A_dims[i] == 1 ? 0 : A_stride;
B_strides_array.data[i] = B_dims[i] == 1 ? 0 : B_stride;
A_stride *= A_dims[i];
B_stride *= B_dims[i];
C_dims_array.data[i] = FixedDivisor<int>(C_dims[i]);
}
const int size =
std::accumulate(C_dims, C_dims + D, 1, std::multiplies<int>());
hipLaunchKernelGGL(( BroadcastBinaryOpCUDAKernel<TIn, TOut, BinaryOperator, D>)
, dim3(CAFFE_GET_BLOCKS(size)),
dim3(CAFFE_CUDA_NUM_THREADS),
0,
context->cuda_stream(),
size, A_strides_array, B_strides_array, C_dims_array, op, A, B, C);
}
template <typename TIn, typename TOut, class BinaryOperator>
CAFFE2_CUDA_EXPORT void BroadcastBinaryOp(
const int A_ndim,
const int* A_dims,
const int B_ndim,
const int* B_dims,
const BinaryOperator& op,
const TIn* A,
const TIn* B,
TOut* C,
CUDAContext* context) {
const int ndim = ::max(A_ndim, B_ndim);
std::vector<int> A_dims_array(ndim);
std::vector<int> B_dims_array(ndim);
std::vector<int> C_dims_array(ndim);
utils::ComputeBroadcastBinaryOpDims(
A_ndim,
A_dims,
B_ndim,
B_dims,
A_dims_array.data(),
B_dims_array.data(),
C_dims_array.data());
if (A_dims_array == B_dims_array) {
const int size = std::accumulate(
C_dims_array.cbegin(), C_dims_array.cend(), 1, std::multiplies<int>());
hipLaunchKernelGGL(( SimpleBinaryOpCUDAKernel<TIn, TOut, BinaryOperator>)
, dim3(CAFFE_GET_BLOCKS(size)),
dim3(CAFFE_CUDA_NUM_THREADS),
0,
context->cuda_stream(), size, op, A, B, C);
return;
}
int rows;
int cols;
bool broadcast_1st;
if (utils::IsRowwiseBroadcastBinaryOp(
ndim,
A_dims_array.data(),
B_dims_array.data(),
&rows,
&cols,
&broadcast_1st)) {
BinaryOpWith2DBroadcasting<TIn, TOut, BinaryOperator>(
rows, cols, true, broadcast_1st, op, A, B, C, context);
return;
}
if (utils::IsColwiseBroadcastBinaryOp(
ndim,
A_dims_array.data(),
B_dims_array.data(),
&rows,
&cols,
&broadcast_1st)) {
BinaryOpWith2DBroadcasting<TIn, TOut, BinaryOperator>(
rows, cols, false, broadcast_1st, op, A, B, C, context);
return;
}
DISPATCH_FUNCTION_BY_VALUE_WITH_TYPE_3(
ndim,
BroadcastBinaryOpImpl,
TIn,
TOut,
BinaryOperator,
A_dims_array.data(),
B_dims_array.data(),
C_dims_array.data(),
op,
A,
B,
C,
context);
}
} // namespace
#define DELEGATE_SIMPLE_CUDA_UNARY_FUNCTION(T, Func, op) \
__global__ void Func##CUDAKernel(const int N, const T* X, T* Y) { \
CUDA_1D_KERNEL_LOOP(i, N) { \
Y[i] = op(X[i]); \
} \
} \
template <> \
CAFFE2_CUDA_EXPORT void Func<T, CUDAContext>( \
const int N, const T* x, T* y, CUDAContext* context) { \
hipLaunchKernelGGL(( Func##CUDAKernel), \
CAFFE_GET_BLOCKS(N), \
CAFFE_CUDA_NUM_THREADS, \
0, \
context->cuda_stream(), N, x, y); \
}
DELEGATE_SIMPLE_CUDA_UNARY_FUNCTION(float, Exp, expf)
DELEGATE_SIMPLE_CUDA_UNARY_FUNCTION(float, Log, logf)
DELEGATE_SIMPLE_CUDA_UNARY_FUNCTION(float, Cos, cosf)
DELEGATE_SIMPLE_CUDA_UNARY_FUNCTION(float, Acos, acosf)
DELEGATE_SIMPLE_CUDA_UNARY_FUNCTION(float, Sin, sinf)
DELEGATE_SIMPLE_CUDA_UNARY_FUNCTION(float, Asin, asinf)
DELEGATE_SIMPLE_CUDA_UNARY_FUNCTION(float, Tan, tanf)
DELEGATE_SIMPLE_CUDA_UNARY_FUNCTION(float, Atan, atanf)
DELEGATE_SIMPLE_CUDA_UNARY_FUNCTION(float, Sinh, sinhf)
DELEGATE_SIMPLE_CUDA_UNARY_FUNCTION(float, Cosh, coshf)
DELEGATE_SIMPLE_CUDA_UNARY_FUNCTION(float, Tanh, tanhf)
DELEGATE_SIMPLE_CUDA_UNARY_FUNCTION(float, Abs, fabsf)
DELEGATE_SIMPLE_CUDA_UNARY_FUNCTION(float, Sqr, utils::Square<float>)
DELEGATE_SIMPLE_CUDA_UNARY_FUNCTION(float, Sqrt, sqrtf)
DELEGATE_SIMPLE_CUDA_UNARY_FUNCTION(float, Rsqrt, rsqrtf)
DELEGATE_SIMPLE_CUDA_UNARY_FUNCTION(float, Cbrt, cbrtf)
DELEGATE_SIMPLE_CUDA_UNARY_FUNCTION(float, Cube, utils::Cube<float>)
DELEGATE_SIMPLE_CUDA_UNARY_FUNCTION(double, Cube, utils::Cube<double>)
DELEGATE_SIMPLE_CUDA_UNARY_FUNCTION(
std::int32_t,
Cube,
utils::Cube<std::int32_t>)
DELEGATE_SIMPLE_CUDA_UNARY_FUNCTION(
std::int64_t,
Cube,
utils::Cube<std::int64_t>)
DELEGATE_SIMPLE_CUDA_UNARY_FUNCTION(bool, Not, utils::Not)
DELEGATE_SIMPLE_CUDA_UNARY_FUNCTION(float, Neg, utils::Negate<float>)
DELEGATE_SIMPLE_CUDA_UNARY_FUNCTION(double, Neg, utils::Negate<double>)
DELEGATE_SIMPLE_CUDA_UNARY_FUNCTION(
std::int32_t,
Neg,
utils::Negate<std::int32_t>)
DELEGATE_SIMPLE_CUDA_UNARY_FUNCTION(
std::int64_t,
Neg,
utils::Negate<std::int64_t>)
DELEGATE_SIMPLE_CUDA_UNARY_FUNCTION(float, Sign, utils::Sign<float>)
DELEGATE_SIMPLE_CUDA_UNARY_FUNCTION(double, Sign, utils::Sign<double>)
DELEGATE_SIMPLE_CUDA_UNARY_FUNCTION(
std::int32_t,
Sign,
utils::Sign<std::int32_t>)
DELEGATE_SIMPLE_CUDA_UNARY_FUNCTION(
std::int64_t,
Sign,
utils::Sign<std::int64_t>)
DELEGATE_SIMPLE_CUDA_UNARY_FUNCTION(float, Inv, utils::Inv<float>)
DELEGATE_SIMPLE_CUDA_UNARY_FUNCTION(double, Inv, utils::Inv<double>)
#undef DELEGATE_SIMPLE_CUDA_UNARY_FUNCTION
#define CAFFE2_SPECIALIZED_CUDA_SINCOS(T) \
template <> \
CAFFE2_CUDA_EXPORT void SinCos<T, CUDAContext>( \
const int N, const T* x, T* ys, T* yc, CUDAContext* context) { \
hipLaunchKernelGGL(( SinCosCUDAKernel), \
CAFFE_GET_BLOCKS(N), \
CAFFE_CUDA_NUM_THREADS, \
0, \
context->cuda_stream(), N, x, ys, yc); \
}
CAFFE2_SPECIALIZED_CUDA_SINCOS(float)
CAFFE2_SPECIALIZED_CUDA_SINCOS(double)
#undef CAFFE2_SPECIALIZED_CUDA_SINCOS
#define DELEGATE_SIMPLE_CUDA_BINARY_FUNCTION(TIn, TOut, Func, Op) \
template <> \
CAFFE2_CUDA_EXPORT void Func<TIn, CUDAContext>( \
const int N, \
const TIn* A, \
const TIn* B, \
TOut* C, \
CUDAContext* context) { \
hipLaunchKernelGGL(( SimpleBinaryOpCUDAKernel<TIn, TOut, Op<TIn>>) \
, dim3(CAFFE_GET_BLOCKS(N)), \
CAFFE_CUDA_NUM_THREADS, \
0, \
context->cuda_stream(), N, Op<TIn>(), A, B, C); \
}
#define DEFINE_SIMPLE_CUDA_COMPARE_FUNCTION(Func, Op) \
DELEGATE_SIMPLE_CUDA_BINARY_FUNCTION(std::int32_t, bool, Func, Op) \
DELEGATE_SIMPLE_CUDA_BINARY_FUNCTION(std::int64_t, bool, Func, Op) \
DELEGATE_SIMPLE_CUDA_BINARY_FUNCTION(float, bool, Func, Op) \
DELEGATE_SIMPLE_CUDA_BINARY_FUNCTION(double, bool, Func, Op) \
DELEGATE_SIMPLE_CUDA_BINARY_FUNCTION(bool, bool, Func, Op)
DEFINE_SIMPLE_CUDA_COMPARE_FUNCTION(EQ, thrust::equal_to)
DEFINE_SIMPLE_CUDA_COMPARE_FUNCTION(NE, thrust::not_equal_to)
DEFINE_SIMPLE_CUDA_COMPARE_FUNCTION(LT, thrust::less)
DEFINE_SIMPLE_CUDA_COMPARE_FUNCTION(LE, thrust::less_equal)
DEFINE_SIMPLE_CUDA_COMPARE_FUNCTION(GT, thrust::greater)
DEFINE_SIMPLE_CUDA_COMPARE_FUNCTION(GE, thrust::greater_equal)
#undef DEFINE_SIMPLE_CUDA_COMPARE_FUNCTION
#define DEFINE_SIMPLE_CUDA_BINARY_FUNCTION(Func, Op) \
DELEGATE_SIMPLE_CUDA_BINARY_FUNCTION(std::int32_t, std::int32_t, Func, Op) \
DELEGATE_SIMPLE_CUDA_BINARY_FUNCTION(std::int64_t, std::int64_t, Func, Op) \
DELEGATE_SIMPLE_CUDA_BINARY_FUNCTION(float, float, Func, Op) \
DELEGATE_SIMPLE_CUDA_BINARY_FUNCTION(double, double, Func, Op) \
DELEGATE_SIMPLE_CUDA_BINARY_FUNCTION(at::Half, at::Half, Func, Op)
DEFINE_SIMPLE_CUDA_BINARY_FUNCTION(Add, AddFunctor)
DEFINE_SIMPLE_CUDA_BINARY_FUNCTION(Sub, SubFunctor)
DEFINE_SIMPLE_CUDA_BINARY_FUNCTION(Mul, MulFunctor)
DEFINE_SIMPLE_CUDA_BINARY_FUNCTION(Div, DivFunctor)
#undef DEFINE_SIMPLE_CUDA_BINARY_FUNCTION
DELEGATE_SIMPLE_CUDA_BINARY_FUNCTION(bool, bool, And, thrust::logical_and)
DELEGATE_SIMPLE_CUDA_BINARY_FUNCTION(bool, bool, Or, thrust::logical_or)
DELEGATE_SIMPLE_CUDA_BINARY_FUNCTION(bool, bool, Xor, thrust::bit_xor)
#define DEFINE_SIMPLE_CUDA_BITWISE_BINARY_FUNCTION(Func, Op) \
DELEGATE_SIMPLE_CUDA_BINARY_FUNCTION(bool, bool, Func, Op) \
DELEGATE_SIMPLE_CUDA_BINARY_FUNCTION(std::int32_t, std::int32_t, Func, Op) \
DELEGATE_SIMPLE_CUDA_BINARY_FUNCTION(std::int64_t, std::int64_t, Func, Op)
DEFINE_SIMPLE_CUDA_BITWISE_BINARY_FUNCTION(BitwiseAnd, thrust::bit_and)
DEFINE_SIMPLE_CUDA_BITWISE_BINARY_FUNCTION(BitwiseOr, thrust::bit_or)
DEFINE_SIMPLE_CUDA_BITWISE_BINARY_FUNCTION(BitwiseXor, thrust::bit_xor)
#undef DEFINE_SIMPLE_CUDA_BITWISE_BINARY_FUNCTION
DELEGATE_SIMPLE_CUDA_BINARY_FUNCTION(
float,
float,
ElemwiseMax,
thrust::maximum);
#undef DELEGATE_SIMPLE_CUDA_BINARY_FUNCTION
#define DELEGATE_2D_BROADCAST_CUDA_BINARY_FUNCTION(TIn, TOut, Func, Op) \
template <> \
CAFFE2_CUDA_EXPORT void Rowwise##Func<TIn, CUDAContext, true>( \
const int rows, \
const int cols, \
const TIn* A, \
const TIn* B, \
TOut* C, \
CUDAContext* context) { \
if (rows == 0 || cols == 0) { \
return; \
} \
const int size = rows * cols; \
const FixedDivisor<int> cols_div(cols); \
hipLaunchKernelGGL(( RowwiseBinaryOpCUDAKenel<TIn, TOut, Op<TIn>, true>) \
, dim3(CAFFE_GET_BLOCKS(size)), \
CAFFE_CUDA_NUM_THREADS, \
0, \
context->cuda_stream(), size, cols_div, Op<TIn>(), A, B, C); \
} \
template <> \
CAFFE2_CUDA_EXPORT void Rowwise##Func<TIn, CUDAContext, false>( \
const int rows, \
const int cols, \
const TIn* A, \
const TIn* B, \
TOut* C, \
CUDAContext* context) { \
if (rows == 0 || cols == 0) { \
return; \
} \
const int size = rows * cols; \
const FixedDivisor<int> cols_div(cols); \
hipLaunchKernelGGL(( RowwiseBinaryOpCUDAKenel<TIn, TOut, Op<TIn>, false>) \
, dim3(CAFFE_GET_BLOCKS(size)), \
CAFFE_CUDA_NUM_THREADS, \
0, \
context->cuda_stream(), size, cols_div, Op<TIn>(), A, B, C); \
} \
template <> \
CAFFE2_CUDA_EXPORT void Colwise##Func<TIn, CUDAContext, true>( \
const int rows, \
const int cols, \
const TIn* A, \
const TIn* B, \
TOut* C, \
CUDAContext* context) { \
if (rows == 0 || cols == 0) { \
return; \
} \
const int size = rows * cols; \
const FixedDivisor<int> cols_div(cols); \
hipLaunchKernelGGL(( ColwiseBinaryOpCUDAKenel<TIn, TOut, Op<TIn>, true>) \
, dim3(CAFFE_GET_BLOCKS(size)), \
CAFFE_CUDA_NUM_THREADS, \
0, \
context->cuda_stream(), size, cols_div, Op<TIn>(), A, B, C); \
} \
template <> \
CAFFE2_CUDA_EXPORT void Colwise##Func<TIn, CUDAContext, false>( \
const int rows, \
const int cols, \
const TIn* A, \
const TIn* B, \
TOut* C, \
CUDAContext* context) { \
if (rows == 0 || cols == 0) { \
return; \
} \
const int size = rows * cols; \
const FixedDivisor<int> cols_div(cols); \
hipLaunchKernelGGL(( ColwiseBinaryOpCUDAKenel<TIn, TOut, Op<TIn>, false>) \
, dim3(CAFFE_GET_BLOCKS(size)), \
CAFFE_CUDA_NUM_THREADS, \
0, \
context->cuda_stream(), size, cols_div, Op<TIn>(), A, B, C); \
}
#define DEFINE_2D_BROADCAST_CUDA_COMPARE_FUNCTION(Func, Op) \
DELEGATE_2D_BROADCAST_CUDA_BINARY_FUNCTION(std::int32_t, bool, Func, Op) \
DELEGATE_2D_BROADCAST_CUDA_BINARY_FUNCTION(std::int64_t, bool, Func, Op) \
DELEGATE_2D_BROADCAST_CUDA_BINARY_FUNCTION(float, bool, Func, Op) \
DELEGATE_2D_BROADCAST_CUDA_BINARY_FUNCTION(double, bool, Func, Op) \
DELEGATE_2D_BROADCAST_CUDA_BINARY_FUNCTION(bool, bool, Func, Op)
DEFINE_2D_BROADCAST_CUDA_COMPARE_FUNCTION(EQ, thrust::equal_to)
DEFINE_2D_BROADCAST_CUDA_COMPARE_FUNCTION(NE, thrust::not_equal_to)
DEFINE_2D_BROADCAST_CUDA_COMPARE_FUNCTION(LT, thrust::less)
DEFINE_2D_BROADCAST_CUDA_COMPARE_FUNCTION(LE, thrust::less_equal)
DEFINE_2D_BROADCAST_CUDA_COMPARE_FUNCTION(GT, thrust::greater)
DEFINE_2D_BROADCAST_CUDA_COMPARE_FUNCTION(GE, thrust::greater_equal)
#undef DEFINE_2D_BROADCAST_CUDA_COMPARE_FUNCTION
#define DEFINE_2D_BROADCAST_CUDA_BINARY_FUNCTION(Func, Op) \
DELEGATE_2D_BROADCAST_CUDA_BINARY_FUNCTION( \
std::int32_t, std::int32_t, Func, Op) \
DELEGATE_2D_BROADCAST_CUDA_BINARY_FUNCTION( \
std::int64_t, std::int64_t, Func, Op) \
DELEGATE_2D_BROADCAST_CUDA_BINARY_FUNCTION(float, float, Func, Op) \
DELEGATE_2D_BROADCAST_CUDA_BINARY_FUNCTION(double, double, Func, Op) \
DELEGATE_2D_BROADCAST_CUDA_BINARY_FUNCTION(at::Half, at::Half, Func, Op)
DEFINE_2D_BROADCAST_CUDA_BINARY_FUNCTION(Add, AddFunctor)
DEFINE_2D_BROADCAST_CUDA_BINARY_FUNCTION(Sub, SubFunctor)
DEFINE_2D_BROADCAST_CUDA_BINARY_FUNCTION(Mul, MulFunctor)
DEFINE_2D_BROADCAST_CUDA_BINARY_FUNCTION(Div, DivFunctor)
#undef DEFINE_2D_BROADCAST_CUDA_BINARY_FUNCTION
DELEGATE_2D_BROADCAST_CUDA_BINARY_FUNCTION(bool, bool, And, thrust::logical_and)
DELEGATE_2D_BROADCAST_CUDA_BINARY_FUNCTION(bool, bool, Or, thrust::logical_or)
DELEGATE_2D_BROADCAST_CUDA_BINARY_FUNCTION(bool, bool, Xor, thrust::bit_xor)
#define DEFINE_2D_BROADCAST_CUDA_BITWISE_BINARY_FUNCTION(Func, Op) \
DELEGATE_2D_BROADCAST_CUDA_BINARY_FUNCTION(bool, bool, Func, Op) \
DELEGATE_2D_BROADCAST_CUDA_BINARY_FUNCTION( \
std::int32_t, std::int32_t, Func, Op) \
DELEGATE_2D_BROADCAST_CUDA_BINARY_FUNCTION( \
std::int64_t, std::int64_t, Func, Op)
DEFINE_2D_BROADCAST_CUDA_BITWISE_BINARY_FUNCTION(BitwiseAnd, thrust::bit_and)
DEFINE_2D_BROADCAST_CUDA_BITWISE_BINARY_FUNCTION(BitwiseOr, thrust::bit_or)
DEFINE_2D_BROADCAST_CUDA_BITWISE_BINARY_FUNCTION(BitwiseXor, thrust::bit_xor)
#undef DEFINE_2D_BROADCAST_CUDA_BITWISE_BINARY_FUNCTION
#undef DELEGATE_2D_BROADCAST_CUDA_BINARY_FUNCTION
#define DELEGATE_BROADCAST_CUDA_BINARY_FUNCTION(TIn, TOut, Func, Op) \
template <> \
CAFFE2_CUDA_EXPORT void Func<TIn, CUDAContext>( \
const int A_ndim, \
const int* A_dims, \
const int B_ndim, \
const int* B_dims, \
const TIn* A, \
const TIn* B, \
TOut* C, \
CUDAContext* context) { \
BroadcastBinaryOp<TIn, TOut, Op<TIn>>( \
A_ndim, A_dims, B_ndim, B_dims, Op<TIn>(), A, B, C, context); \
}
#define DEFINE_BROADCAST_CUDA_COMPARE_FUNCTION(Func, Op) \
DELEGATE_BROADCAST_CUDA_BINARY_FUNCTION(std::int32_t, bool, Func, Op) \
DELEGATE_BROADCAST_CUDA_BINARY_FUNCTION(std::int64_t, bool, Func, Op) \
DELEGATE_BROADCAST_CUDA_BINARY_FUNCTION(float, bool, Func, Op) \
DELEGATE_BROADCAST_CUDA_BINARY_FUNCTION(double, bool, Func, Op) \
DELEGATE_BROADCAST_CUDA_BINARY_FUNCTION(bool, bool, Func, Op)
DEFINE_BROADCAST_CUDA_COMPARE_FUNCTION(EQ, thrust::equal_to)
DEFINE_BROADCAST_CUDA_COMPARE_FUNCTION(NE, thrust::not_equal_to)
DEFINE_BROADCAST_CUDA_COMPARE_FUNCTION(LT, thrust::less)
DEFINE_BROADCAST_CUDA_COMPARE_FUNCTION(LE, thrust::less_equal)
DEFINE_BROADCAST_CUDA_COMPARE_FUNCTION(GT, thrust::greater)
DEFINE_BROADCAST_CUDA_COMPARE_FUNCTION(GE, thrust::greater_equal)
#undef DEFINE_BROADCAST_CUDA_COMPARE_FUNCTION
#define DEFINE_BROADCAST_CUDA_BINARY_FUNCTION(Func, Op) \
DELEGATE_BROADCAST_CUDA_BINARY_FUNCTION( \
std::int32_t, std::int32_t, Func, Op) \
DELEGATE_BROADCAST_CUDA_BINARY_FUNCTION( \
std::int64_t, std::int64_t, Func, Op) \
DELEGATE_BROADCAST_CUDA_BINARY_FUNCTION(float, float, Func, Op) \
DELEGATE_BROADCAST_CUDA_BINARY_FUNCTION(double, double, Func, Op) \
DELEGATE_BROADCAST_CUDA_BINARY_FUNCTION(at::Half, at::Half, Func, Op)
DEFINE_BROADCAST_CUDA_BINARY_FUNCTION(Add, AddFunctor)
DEFINE_BROADCAST_CUDA_BINARY_FUNCTION(Sub, SubFunctor)
DEFINE_BROADCAST_CUDA_BINARY_FUNCTION(Mul, MulFunctor)
DEFINE_BROADCAST_CUDA_BINARY_FUNCTION(Div, DivFunctor)
#undef DEFINE_BROADCAST_CUDA_BINARY_FUNCTION
DELEGATE_BROADCAST_CUDA_BINARY_FUNCTION(bool, bool, And, thrust::logical_and)
DELEGATE_BROADCAST_CUDA_BINARY_FUNCTION(bool, bool, Or, thrust::logical_or)
DELEGATE_BROADCAST_CUDA_BINARY_FUNCTION(bool, bool, Xor, thrust::bit_xor)
#define DEFINE_BROADCAST_CUDA_BITWISE_BINARY_FUNCTION(Func, Op) \
DELEGATE_BROADCAST_CUDA_BINARY_FUNCTION(bool, bool, Func, Op) \
DELEGATE_BROADCAST_CUDA_BINARY_FUNCTION( \
std::int32_t, std::int32_t, Func, Op) \
DELEGATE_BROADCAST_CUDA_BINARY_FUNCTION(std::int64_t, std::int64_t, Func, Op)
DEFINE_BROADCAST_CUDA_BITWISE_BINARY_FUNCTION(BitwiseAnd, thrust::bit_and)
DEFINE_BROADCAST_CUDA_BITWISE_BINARY_FUNCTION(BitwiseOr, thrust::bit_or)
DEFINE_BROADCAST_CUDA_BITWISE_BINARY_FUNCTION(BitwiseXor, thrust::bit_xor)
#undef DEFINE_BROADCAST_CUDA_BITWISE_BINARY_FUNCTION
#undef DELEGATE_BROADCAST_CUDA_BINARY_FUNCTION
#define DELEGATE_REDUCTION_FUNCTION(T, Funcname, func) \
template <> \
CAFFE2_CUDA_EXPORT void Funcname<T, CUDAContext>( \
const int N, \
const T* src, \
T* dst, \
Tensor* scratch_ptr, \
CUDAContext* context) { \
size_t memRequired = 0; \
hipcub::DeviceReduce::func( \
nullptr, memRequired, src, dst, N, context->cuda_stream()); \
auto buffer_size = \
static_cast<TIndex>((memRequired + sizeof(T) - 1) / sizeof(T)); \
scratch_ptr->Resize(std::vector<TIndex>{buffer_size}); \
hipcub::DeviceReduce::func( \
static_cast<void*>(scratch_ptr->mutable_data<T>()), \
memRequired, \
src, \
dst, \
N, \
context->cuda_stream()); \
}
DELEGATE_REDUCTION_FUNCTION(float, ReduceMin, Min)
DELEGATE_REDUCTION_FUNCTION(float, ReduceMax, Max)
DELEGATE_REDUCTION_FUNCTION(int32_t, ReduceMax, Max)
DELEGATE_REDUCTION_FUNCTION(int64_t, ReduceMax, Max)
#undef DELEGATE_REDUCTION_FUNCTION
// Caffe2 gemm provides a simpler interface to the gemm functions, with the
// limitation that the data has to be contiguous in memory.
template <>
CAFFE2_CUDA_EXPORT void Gemm<float, CUDAContext>(
const CBLAS_TRANSPOSE trans_A,
const CBLAS_TRANSPOSE trans_B,
const int M,
const int N,
const int K,
const float alpha,
const float* A,
const float* B,
const float beta,
float* C,
CUDAContext* context,
TensorProto::DataType math_type) {
// Note that cublas follows fortran order, so the order is different from
// the cblas convention.
const int lda = (trans_A == CblasNoTrans) ? K : M;
const int ldb = (trans_B == CblasNoTrans) ? N : K;
const hipblasOperation_t cu_trans_A =
(trans_A == CblasNoTrans) ? HIPBLAS_OP_N : HIPBLAS_OP_T;
const hipblasOperation_t cu_trans_B =
(trans_B == CblasNoTrans) ? HIPBLAS_OP_N : HIPBLAS_OP_T;
CUBLAS_ENFORCE(
hipblasSetPointerMode(context->cublas_handle(), HIPBLAS_POINTER_MODE_HOST));
CUBLAS_ENFORCE(hipblasSgemm(
context->cublas_handle(),
cu_trans_B,
cu_trans_A,
N,
M,
K,
&alpha,
B,
ldb,
A,
lda,
&beta,
C,
N));
}
template <>
CAFFE2_CUDA_EXPORT void Gemm<at::Half, CUDAContext>(
const CBLAS_TRANSPOSE trans_A,
const CBLAS_TRANSPOSE trans_B,
const int M,
const int N,
const int K,
const float alpha,
const at::Half* A,
const at::Half* B,
const float beta,
at::Half* C,
CUDAContext* context,
TensorProto::DataType math_type) {
// Note that cublas follows fortran order, so the order is different from
// the cblas convention.
const int lda = (trans_A == CblasNoTrans) ? K : M;
const int ldb = (trans_B == CblasNoTrans) ? N : K;
const hipblasOperation_t cu_trans_A =
(trans_A == CblasNoTrans) ? HIPBLAS_OP_N : HIPBLAS_OP_T;
const hipblasOperation_t cu_trans_B =
(trans_B == CblasNoTrans) ? HIPBLAS_OP_N : HIPBLAS_OP_T;
if (math_type == TensorProto_DataType_FLOAT) {
CUBLAS_ENFORCE(hipblasSetPointerMode(
context->cublas_handle(), HIPBLAS_POINTER_MODE_HOST));
CUBLAS_ENFORCE(cublasSgemmEx(
context->cublas_handle(),
cu_trans_B,
cu_trans_A,
N,
M,
K,
&alpha,
B,
HIP_R_16F,
ldb,
A,
HIP_R_16F,
lda,
&beta,
C,
HIP_R_16F,
N));
} else if (math_type == TensorProto_DataType_FLOAT16) {
// convert alpha, beta from float -> __half
const __half alpha_fp16 = at::Half(alpha);
const __half beta_fp16 = at::Half(beta);
// call hipblasHgemm
CUBLAS_ENFORCE(hipblasSetPointerMode(
context->cublas_handle(), HIPBLAS_POINTER_MODE_HOST));
CUBLAS_ENFORCE(hipblasHgemm(
context->cublas_handle(),
cu_trans_B,
cu_trans_A,
N,
M,
K,
&alpha_fp16,
(const __half*)B,
ldb,
(const __half*)A,
lda,
&beta_fp16,
(__half*)C,
N));
} else {
// fail
CAFFE_THROW("Unsupported math type");
}
}
template <>
CAFFE2_CUDA_EXPORT void BiasCHW<float, CUDAContext>(
const float* bias,
const float* bias_multiplier,
const int bias_channels,
const int image_size,
float* image,
CUDAContext* context) {
Gemm<float, CUDAContext>(
CblasNoTrans,
CblasNoTrans,
bias_channels,
image_size,
1,
1,
bias,
bias_multiplier,
1,
image,
context);
}
template <>
CAFFE2_CUDA_EXPORT void GemmBatched<float, CUDAContext>(
const CBLAS_TRANSPOSE trans_A,
const CBLAS_TRANSPOSE trans_B,
const int batch_size,
const int M,
const int N,
const int K,
const float alpha,
const float** A,
const float** B,
const float beta,
float** C,
CUDAContext* context,
TensorProto::DataType math_type) {
#if __CUDACC_VER_MAJOR__ < 8
// loop over matrices in the batch
for (int i = 0; i < batch_size; ++i) {
Gemm<float, CUDAContext>(
trans_A,
trans_B,
M,
N,
K,
alpha,
A[i],
B[i],
beta,
C[i],
context,
math_type);
}
#else
// Note that cublas follows fortran order, so the order is different from
// the cblas convention.
const int lda = (trans_A == CblasNoTrans) ? K : M;
const int ldb = (trans_B == CblasNoTrans) ? N : K;
const int ldc = N;
const hipblasOperation_t cu_trans_A =
(trans_A == CblasNoTrans) ? HIPBLAS_OP_N : HIPBLAS_OP_T;
const hipblasOperation_t cu_trans_B =
(trans_B == CblasNoTrans) ? HIPBLAS_OP_N : HIPBLAS_OP_T;
thrust::device_vector<const float*> A_device(A, A + batch_size);
thrust::device_vector<const float*> B_device(B, B + batch_size);
thrust::device_vector<float*> C_device(C, C + batch_size);
CUBLAS_ENFORCE(
hipblasSetPointerMode(context->cublas_handle(), HIPBLAS_POINTER_MODE_HOST));
CUBLAS_ENFORCE(hipblasSgemmBatched(
context->cublas_handle(),
cu_trans_B,
cu_trans_A,
N,
M,
K,
&alpha,
B_device.data().get(),
ldb,
A_device.data().get(),
lda,
&beta,
C_device.data().get(),
ldc,
batch_size));
#endif
}
template <>
CAFFE2_CUDA_EXPORT void GemmStridedBatched<float, CUDAContext>(
const CBLAS_TRANSPOSE trans_A,
const CBLAS_TRANSPOSE trans_B,
const int batch_size,
const int M,
const int N,
const int K,
const float alpha,
const float* A,
const int A_stride,
const float* B,
const int B_stride,
const float beta,
float* C,
const int C_stride,
CUDAContext* context,
TensorProto::DataType math_type) {
#if __CUDACC_VER_MAJOR__ < 8
// loop over matrices in the batch
for (int i = 0; i < batch_size; ++i) {
Gemm<float, CUDAContext>(
trans_A, trans_B, M, N, K, alpha, A, B, beta, C, context, math_type);
A += A_stride;
B += B_stride;
C += C_stride;
}
#else
// Note that cublas follows fortran order, so the order is different from
// the cblas convention.
const int lda = (trans_A == CblasNoTrans) ? K : M;
const int ldb = (trans_B == CblasNoTrans) ? N : K;
const int ldc = N;
const hipblasOperation_t cu_trans_A =
(trans_A == CblasNoTrans) ? HIPBLAS_OP_N : HIPBLAS_OP_T;
const hipblasOperation_t cu_trans_B =
(trans_B == CblasNoTrans) ? HIPBLAS_OP_N : HIPBLAS_OP_T;
CUBLAS_ENFORCE(
hipblasSetPointerMode(context->cublas_handle(), HIPBLAS_POINTER_MODE_HOST));
CUBLAS_ENFORCE(hipblasSgemmStridedBatched(
context->cublas_handle(),
cu_trans_B,
cu_trans_A,
N,
M,
K,
&alpha,
B,
ldb,
B_stride,
A,
lda,
A_stride,
&beta,
C,
ldc,
C_stride,
batch_size));
#endif
}
template <>
CAFFE2_CUDA_EXPORT void GemmBatched<at::Half, CUDAContext>(
const CBLAS_TRANSPOSE trans_A,
const CBLAS_TRANSPOSE trans_B,
const int batch_size,
const int M,
const int N,
const int K,
const float alpha,
const at::Half** A,
const at::Half** B,
const float beta,
at::Half** C,
CUDAContext* context,
TensorProto::DataType math_type) {
#if __CUDACC_VER_MAJOR__ < 9
// loop over matrices in the batch
for (int i = 0; i < batch_size; ++i) {
Gemm<at::Half, CUDAContext>(
trans_A,
trans_B,
M,
N,
K,
alpha,
A[i],
B[i],
beta,
C[i],
context,
math_type);
}
#else
// Note that cublas follows fortran order, so the order is different from
// the cblas convention.
const int lda = (trans_A == CblasNoTrans) ? K : M;
const int ldb = (trans_B == CblasNoTrans) ? N : K;
const int ldc = N;
const hipblasOperation_t cu_trans_A =
(trans_A == CblasNoTrans) ? HIPBLAS_OP_N : HIPBLAS_OP_T;
const hipblasOperation_t cu_trans_B =
(trans_B == CblasNoTrans) ? HIPBLAS_OP_N : HIPBLAS_OP_T;
if (math_type == TensorProto_DataType_FLOAT) {
#if TORCH_HIP_VERSION < 9010
// loop over matrices in the batch
for (int i = 0; i < batch_size; ++i) {
Gemm<at::Half, CUDAContext>(
trans_A,
trans_B,
M,
N,
K,
alpha,
A[i],
B[i],
beta,
C[i],
context,
math_type);
}
#else
thrust::device_vector<const void*> A_device(A, A + batch_size);
thrust::device_vector<const void*> B_device(B, B + batch_size);
thrust::device_vector<void*> C_device(C, C + batch_size);
CUBLAS_ENFORCE(hipblasSetPointerMode(
context->cublas_handle(), HIPBLAS_POINTER_MODE_HOST));
CUBLAS_ENFORCE(hipblasGemmBatchedEx(
context->cublas_handle(),
cu_trans_B,
cu_trans_A,
N,
M,
K,
&alpha,
B_device.data().get(),
HIP_R_16F,
ldb,
A_device.data().get(),
HIP_R_16F,
lda,
&beta,
C_device.data().get(),
HIP_R_16F,
ldc,
batch_size,
HIP_R_32F,
CUBLAS_GEMM_DEFAULT_TENSOR_OP));
#endif
} else if (math_type == TensorProto_DataType_FLOAT16) {
// Convert alpha, beta from float -> __half
const __half alpha_fp16 = at::Half(alpha);
const __half beta_fp16 = at::Half(beta);
std::vector<const __half*> A_array(batch_size);
std::vector<const __half*> B_array(batch_size);
std::vector<__half*> C_array(batch_size);
for (int i = 0; i < batch_size; ++i) {
A_array[i] = reinterpret_cast<const __half*>(A[i]);
B_array[i] = reinterpret_cast<const __half*>(B[i]);
C_array[i] = reinterpret_cast<__half*>(C[i]);
}
thrust::device_vector<const __half*> A_device(
A_array.cbegin(), A_array.cend());
thrust::device_vector<const __half*> B_device(
B_array.cbegin(), B_array.cend());
thrust::device_vector<__half*> C_device(C_array.cbegin(), C_array.cend());
CUBLAS_ENFORCE(hipblasSetPointerMode(
context->cublas_handle(), HIPBLAS_POINTER_MODE_HOST));
CUBLAS_ENFORCE(hipblasHgemmBatched(
context->cublas_handle(),
cu_trans_B,
cu_trans_A,
N,
M,
K,
&alpha_fp16,
B_device.data().get(),
ldb,
A_device.data().get(),
lda,
&beta_fp16,
C_device.data().get(),
ldc,
batch_size));
} else {
CAFFE_THROW("Unsupported math type");
}
#endif
}
template <>
CAFFE2_CUDA_EXPORT void GemmStridedBatched<at::Half, CUDAContext>(
const CBLAS_TRANSPOSE trans_A,
const CBLAS_TRANSPOSE trans_B,
const int batch_size,
const int M,
const int N,
const int K,
const float alpha,
const at::Half* A,
const int A_stride,
const at::Half* B,
const int B_stride,
const float beta,
at::Half* C,
const int C_stride,
CUDAContext* context,
TensorProto::DataType math_type) {
#if __CUDACC_VER_MAJOR__ < 8
// loop over matrices in the batch
for (int i = 0; i < batch_size; ++i) {
Gemm<at::Half, CUDAContext>(
trans_A, trans_B, M, N, K, alpha, A, B, beta, C, context, math_type);
A += A_stride;
B += B_stride;
C += C_stride;
}
#else
// Note that cublas follows fortran order, so the order is different from
// the cblas convention.
const int lda = (trans_A == CblasNoTrans) ? K : M;
const int ldb = (trans_B == CblasNoTrans) ? N : K;
const int ldc = N;
const hipblasOperation_t cu_trans_A =
(trans_A == CblasNoTrans) ? HIPBLAS_OP_N : HIPBLAS_OP_T;
const hipblasOperation_t cu_trans_B =
(trans_B == CblasNoTrans) ? HIPBLAS_OP_N : HIPBLAS_OP_T;
if (math_type == TensorProto_DataType_FLOAT) {
#if TORCH_HIP_VERSION < 9010
// loop over matrices in the batch
for (int i = 0; i < batch_size; ++i) {
Gemm<at::Half, CUDAContext>(
trans_A, trans_B, M, N, K, alpha, A, B, beta, C, context, math_type);
A += A_stride;
B += B_stride;
C += C_stride;
}
#else
CUBLAS_ENFORCE(hipblasSetPointerMode(
context->cublas_handle(), HIPBLAS_POINTER_MODE_HOST));
CUBLAS_ENFORCE(hipblasGemmStridedBatchedEx(
context->cublas_handle(),
cu_trans_B,
cu_trans_A,
N,
M,
K,
&alpha,
B,
HIP_R_16F,
ldb,
B_stride,
A,
HIP_R_16F,
lda,
A_stride,
&beta,
C,
HIP_R_16F,
ldc,
C_stride,
batch_size,
HIP_R_32F,
CUBLAS_GEMM_DEFAULT_TENSOR_OP));
#endif
} else if (math_type == TensorProto_DataType_FLOAT16) {
// Convert alpha, beta from float -> __half
const __half alpha_fp16 = at::Half(alpha);
const __half beta_fp16 = at::Half(beta);
CUBLAS_ENFORCE(hipblasSetPointerMode(
context->cublas_handle(), HIPBLAS_POINTER_MODE_HOST));
CUBLAS_ENFORCE(hipblasHgemmStridedBatched(
context->cublas_handle(),
cu_trans_B,
cu_trans_A,
N,
M,
K,
&alpha_fp16,
(const __half*)B,
ldb,
B_stride,
(const __half*)A,
lda,
A_stride,
&beta_fp16,
(__half*)C,
ldc,
C_stride,
batch_size));
} else {
CAFFE_THROW("Unsupported math type");
}
#endif
}
#if TORCH_HIP_VERSION >= 9000
// No change, but required. Defer to default CUDA engine
template <>
CAFFE2_CUDA_EXPORT void Gemm<float, CUDAContext, TensorCoreEngine>(
const CBLAS_TRANSPOSE trans_A,
const CBLAS_TRANSPOSE trans_B,
const int M,
const int N,
const int K,
const float alpha,
const float* A,
const float* B,
const float beta,
float* C,
CUDAContext* context,
TensorProto::DataType math_type) {
return Gemm<float, CUDAContext>(
trans_A, trans_B, M, N, K, alpha, A, B, beta, C, context, math_type);
}
template <>
CAFFE2_CUDA_EXPORT void Gemm<at::Half, CUDAContext, TensorCoreEngine>(
const CBLAS_TRANSPOSE trans_A,
const CBLAS_TRANSPOSE trans_B,
const int M,
const int N,
const int K,
const float alpha,
const at::Half* A,
const at::Half* B,
const float beta,
at::Half* C,
CUDAContext* context,
TensorProto::DataType math_type) {
// Note that cublas follows fortran order, so the order is different from
// the cblas convention.
const int lda = (trans_A == CblasNoTrans) ? K : M;
const int ldb = (trans_B == CblasNoTrans) ? N : K;
const hipblasOperation_t cu_trans_A =
(trans_A == CblasNoTrans) ? HIPBLAS_OP_N : HIPBLAS_OP_T;
const hipblasOperation_t cu_trans_B =
(trans_B == CblasNoTrans) ? HIPBLAS_OP_N : HIPBLAS_OP_T;
// enable TensorCore for this call on this handle
if (TensorCoreAvailable()) {
CUBLAS_ENFORCE(
cublasSetMathMode(context->cublas_handle(), CUBLAS_TENSOR_OP_MATH));
}
CUBLAS_ENFORCE(
hipblasSetPointerMode(context->cublas_handle(), HIPBLAS_POINTER_MODE_HOST));
CUBLAS_ENFORCE(hipblasGemmEx(
context->cublas_handle(),
cu_trans_B,
cu_trans_A,
N,
M,
K,
&alpha,
B,
HIP_R_16F,
ldb,
A,
HIP_R_16F,
lda,
&beta,
C,
HIP_R_16F,
N,
HIP_R_32F,
CUBLAS_GEMM_DFALT_TENSOR_OP));
// Now disable TensorCore math for subsequent calls to this handle
if (TensorCoreAvailable()) {
CUBLAS_ENFORCE(
cublasSetMathMode(context->cublas_handle(), CUBLAS_DEFAULT_MATH));
}
}
template <>
CAFFE2_CUDA_EXPORT void GemmStridedBatched<float, CUDAContext, TensorCoreEngine>(
const CBLAS_TRANSPOSE trans_A,
const CBLAS_TRANSPOSE trans_B,
const int batch_size,
const int M,
const int N,
const int K,
const float alpha,
const float* A,
const int A_stride,
const float* B,
const int B_stride,
const float beta,
float* C,
const int C_stride,
CUDAContext* context,
TensorProto::DataType math_type) {
return GemmStridedBatched<float, CUDAContext, DefaultEngine>(
trans_A,
trans_B,
batch_size,
M,
N,
K,
alpha,
A,
A_stride,
B,
B_stride,
beta,
C,
C_stride,
context,
math_type);
}
template <>
CAFFE2_CUDA_EXPORT void GemmStridedBatched<at::Half, CUDAContext, TensorCoreEngine>(
const CBLAS_TRANSPOSE trans_A,
const CBLAS_TRANSPOSE trans_B,
const int batch_size,
const int M,
const int N,
const int K,
const float alpha,
const at::Half* A,
const int A_stride,
const at::Half* B,
const int B_stride,
const float beta,
at::Half* C,
const int C_stride,
CUDAContext* context,
TensorProto::DataType math_type) {
return GemmStridedBatched<at::Half, CUDAContext, DefaultEngine>(
trans_A,
trans_B,
batch_size,
M,
N,
K,
alpha,
A,
A_stride,
B,
B_stride,
beta,
C,
C_stride,
context,
math_type);
}
#endif // TORCH_HIP_VERSION >= 9000
template <>
CAFFE2_CUDA_EXPORT void GemmEx<float, CUDAContext>(
const CBLAS_TRANSPOSE trans_A,
const CBLAS_TRANSPOSE trans_B,
const int M,
const int N,
const int K,
const float alpha,
const float* A,
const int lda,
const float* B,
const int ldb,
const float beta,
float* C,
const int ldc,
CUDAContext* context) {
// Note that cublas follows fortran order, so the order is different from
// the cblas convention.
const hipblasOperation_t cu_trans_A =
(trans_A == CblasNoTrans) ? HIPBLAS_OP_N : HIPBLAS_OP_T;
const hipblasOperation_t cu_trans_B =
(trans_B == CblasNoTrans) ? HIPBLAS_OP_N : HIPBLAS_OP_T;
CUBLAS_ENFORCE(
hipblasSetPointerMode(context->cublas_handle(), HIPBLAS_POINTER_MODE_HOST));
CUBLAS_ENFORCE(hipblasSgemm(
context->cublas_handle(),
cu_trans_B,
cu_trans_A,
N,
M,
K,
&alpha,
B,
ldb,
A,
lda,
&beta,
C,
ldc));
}
template <>
CAFFE2_CUDA_EXPORT void Gemv<float, CUDAContext>(
const CBLAS_TRANSPOSE trans_A,
const int M,
const int N,
const float alpha,
const float* A,
const float* x,
const float beta,
float* y,
CUDAContext* context,
TensorProto::DataType math_type) {
const hipblasOperation_t cu_trans_A =
(trans_A == CblasNoTrans) ? HIPBLAS_OP_T : HIPBLAS_OP_N;
CUBLAS_ENFORCE(
hipblasSetPointerMode(context->cublas_handle(), HIPBLAS_POINTER_MODE_HOST));
CUBLAS_ENFORCE(hipblasSgemv(
context->cublas_handle(),
cu_trans_A,
N,
M,
&alpha,
A,
N,
x,
1,
&beta,
y,
1));
}
// Batched Add variants
namespace {
template <typename T>
__global__ void AddStripedBatchKernel(
const int N,
const T* first,
T* Y,
const int stripe,
const int batch) {
for (int j = 0; j < batch; j++) {
const T* x = first + j * stripe;
CUDA_1D_KERNEL_LOOP(i, N) {
float tmpY = convert::To<T, float>(Y[i]);
tmpY += convert::To<T, float>(x[i]);
Y[i] = convert::To<float, T>(tmpY);
}
}
}
} // namespace
#define CAFFE2_SPECIALIZED_CUDA_ADD_STRIPED_BATCH(T) \
template <> \
CAFFE2_CUDA_EXPORT void AddStripedBatch<T, CUDAContext>( \
const int N, \
const T* first, \
T* Y, \
const int stripe, \
const int batch, \
CUDAContext* context) { \
hipLaunchKernelGGL(( AddStripedBatchKernel<T>) \
, dim3(CAFFE_GET_BLOCKS(N)), \
CAFFE_CUDA_NUM_THREADS, \
0, \
context->cuda_stream(), N, first, Y, stripe, batch); \
}
CAFFE2_SPECIALIZED_CUDA_ADD_STRIPED_BATCH(float);
CAFFE2_SPECIALIZED_CUDA_ADD_STRIPED_BATCH(at::Half);
#undef CAFFE2_SPECIALIZED_CUDA_ADD_STRIPED_BATCH
template <>
CAFFE2_CUDA_EXPORT void Gemv<at::Half, CUDAContext>(
const CBLAS_TRANSPOSE trans_A,
const int M,
const int N,
const float alpha,
const at::Half* A,
const at::Half* x,
const float beta,
at::Half* y,
CUDAContext* context,
TensorProto::DataType math_type) {
const hipblasOperation_t cu_trans_A =
(trans_A == CblasNoTrans) ? HIPBLAS_OP_T : HIPBLAS_OP_N;
// sort out what we need to call cublasSgemmEx / hipblasHgemm
const int m = (cu_trans_A == HIPBLAS_OP_N) ? N : M;
const int k = (cu_trans_A == HIPBLAS_OP_N) ? M : N;
const int lda = (cu_trans_A == HIPBLAS_OP_N) ? m : k;
const int ldc = m;
if (math_type == TensorProto_DataType_FLOAT) {
CUBLAS_ENFORCE(hipblasSetPointerMode(
context->cublas_handle(), HIPBLAS_POINTER_MODE_HOST));
CUBLAS_ENFORCE(cublasSgemmEx(
context->cublas_handle(),
cu_trans_A,
HIPBLAS_OP_N,
m,
1,
k,
&alpha,
A,
HIP_R_16F,
lda,
x,
HIP_R_16F,
k,
&beta,
y,
HIP_R_16F,
ldc));
} else if (math_type == TensorProto_DataType_FLOAT16) {
const __half alpha_fp16 = at::Half(alpha);
const __half beta_fp16 = at::Half(beta);
CUBLAS_ENFORCE(hipblasSetPointerMode(
context->cublas_handle(), HIPBLAS_POINTER_MODE_HOST));
CUBLAS_ENFORCE(hipblasHgemm(
context->cublas_handle(),
cu_trans_A,
HIPBLAS_OP_N,
m,
1,
k,
&alpha_fp16,
(const __half*)A,
lda,
(const __half*)x,
k,
&beta_fp16,
(__half*)y,
ldc));
} else {
// fail
CAFFE_THROW("Unsupported math type");
}
}
namespace {
template <typename T>
__global__ void SetKernel(const int N, const T alpha, T* Y) {
CUDA_1D_KERNEL_LOOP(i, N) {
Y[i] = alpha;
}
}
} // namespace
#define CAFFE2_SPECIALIZED_CUDA_SET(T) \
template <> \
CAFFE2_CUDA_API void Set<T, CUDAContext>( \
const size_t N, const T alpha, T* Y, CUDAContext* context) { \
if (N == 0) { \
return; \
} \
if (alpha == T(0)) { \
hipMemsetAsync(Y, 0, sizeof(T) * N, context->cuda_stream()); \
} else { \
hipLaunchKernelGGL(( SetKernel<T>) \
, dim3(CAFFE_GET_BLOCKS(N)), \
CAFFE_CUDA_NUM_THREADS, \
0, \
context->cuda_stream(), N, alpha, Y); \
} \
}
CAFFE2_SPECIALIZED_CUDA_SET(float);
CAFFE2_SPECIALIZED_CUDA_SET(double);
CAFFE2_SPECIALIZED_CUDA_SET(bool);
CAFFE2_SPECIALIZED_CUDA_SET(int8_t);
CAFFE2_SPECIALIZED_CUDA_SET(int16_t);
CAFFE2_SPECIALIZED_CUDA_SET(int);
CAFFE2_SPECIALIZED_CUDA_SET(int64_t);
CAFFE2_SPECIALIZED_CUDA_SET(char);
CAFFE2_SPECIALIZED_CUDA_SET(uint8_t);
CAFFE2_SPECIALIZED_CUDA_SET(uint16_t);
#undef CAFFE2_SPECIALIZED_CUDA_SET
template <>
CAFFE2_CUDA_EXPORT void Set<at::Half, CUDAContext>(
const size_t N,
const at::Half alpha,
at::Half* Y,
CUDAContext* context) {
if (N > 0) {
hipLaunchKernelGGL(( SetKernel<at::Half>)
, dim3(CAFFE_GET_BLOCKS(N)),
dim3(CAFFE_CUDA_NUM_THREADS),
0,
context->cuda_stream(), N, alpha, Y);
}
}
namespace {
template <typename T>
__global__ void
UniformShift(const size_t N, const float min, const float max, T* x) {
float scale = max - min;
CUDA_1D_KERNEL_LOOP(i, N) {
x[i] = convert::To<float, T>(convert::To<T, float>(x[i]) * scale + min);
}
}
__global__ void
UniformIntFit(const size_t N, const int min, const int max, unsigned int* x) {
int* x_int = reinterpret_cast<int*>(x);
int range = (max - min + 1);
CUDA_1D_KERNEL_LOOP(i, N) {
x_int[i] = min + static_cast<int>(x[i] % range);
}
}
} // namespace
template <>
CAFFE2_CUDA_EXPORT void RandUniform<float, CUDAContext>(
const size_t n,
const float min,
const float max,
float* r,
CUDAContext* context) {
CURAND_ENFORCE(hiprandGenerateUniform(context->curand_generator(), r, n));
hipLaunchKernelGGL(( UniformShift<float>)
, dim3(CAFFE_GET_BLOCKS(n)),
dim3(CAFFE_CUDA_NUM_THREADS),
0,
context->cuda_stream(), n, min, max, r);
}
template <>
CAFFE2_CUDA_EXPORT void RandUniform<double, CUDAContext>(
const size_t n,
const double min,
const double max,
double* r,
CUDAContext* context) {
CURAND_ENFORCE(
hiprandGenerateUniformDouble(context->curand_generator(), r, n));
hipLaunchKernelGGL(( UniformShift<double>)
, dim3(CAFFE_GET_BLOCKS(n)),
dim3(CAFFE_CUDA_NUM_THREADS),
0,
context->cuda_stream(), n, min, max, r);
}
template <>
CAFFE2_CUDA_EXPORT void RandUniform<int, CUDAContext>(
const size_t n,
const int min,
const int max,
int* r,
CUDAContext* context) {
CURAND_ENFORCE(hiprandGenerate(
context->curand_generator(), reinterpret_cast<unsigned int*>(r), n));
hipLaunchKernelGGL(( UniformIntFit),
dim3(CAFFE_GET_BLOCKS(n)),
dim3(CAFFE_CUDA_NUM_THREADS),
0,
context->cuda_stream(),
n, min, max, reinterpret_cast<unsigned int*>(r));
}
template <typename T>
size_t HandleOddLengthRandGaussian(
const size_t n,
const T mean,
const T std,
T* r,
CUDAContext* context) {
if (n % 2 == 1) {
std::default_random_engine generator;
std::normal_distribution<T> distribution(mean, std);
const T random_value = distribution(generator);
Set<T, CUDAContext>(1, random_value, r + (n - 1), context);
return n - 1;
}
return n;
}
template <>
CAFFE2_CUDA_EXPORT void RandGaussian<float, CUDAContext>(
const size_t n,
const float mean,
const float std,
float* r,
CUDAContext* context) {
// If n is odd, we add a random Gaussian value at the end manually
// and generate n-1 random values using hiprandGenerateNormal.
// hiprandGenerateNormal requires n to be even.
const size_t even_n =
HandleOddLengthRandGaussian<float>(n, mean, std, r, context);
CURAND_ENFORCE(
hiprandGenerateNormal(context->curand_generator(), r, even_n, mean, std));
}
template <>
CAFFE2_CUDA_EXPORT void RandGaussian<double, CUDAContext>(
const size_t n,
const double mean,
const double std,
double* r,
CUDAContext* context) {
const size_t even_n =
HandleOddLengthRandGaussian<double>(n, mean, std, r, context);
CURAND_ENFORCE(hiprandGenerateNormalDouble(
context->curand_generator(), r, even_n, mean, std));
}
template <>
CAFFE2_CUDA_EXPORT void Dot<float, CUDAContext>(
const int n,
const float* a,
const float* b,
float* y,
CUDAContext* context) {
CUBLAS_ENFORCE(hipblasSetPointerMode(
context->cublas_handle(), HIPBLAS_POINTER_MODE_DEVICE));
CUBLAS_ENFORCE(hipblasSdot(context->cublas_handle(), n, a, 1, b, 1, y));
}
template <>
CAFFE2_CUDA_EXPORT void Dot<at::Half, CUDAContext>(
const int n,
const at::Half* a,
const at::Half* b,
at::Half* y,
CUDAContext* context) {
// execute with 32-bit math
CUBLAS_ENFORCE(hipblasSetPointerMode(
context->cublas_handle(), HIPBLAS_POINTER_MODE_DEVICE));
CUBLAS_ENFORCE(hipblasDotEx_v2(
context->cublas_handle(),
n,
a,
HIP_R_16F,
1,
b,
HIP_R_16F,
1,
y,
HIP_R_16F,
HIP_R_32F));
}
// A previous version of caffe2 used Thrust but it turns out that thrust
// reduction has an implicit scratch space allocation and deallocation, which
// may interfere with NCCL and create a deadlock. Hence we are using a custom
// reduction here.
#define SUM_KERNEL_NTHREADS 128
template <typename T>
__global__ void SumKernel(const int N, const T* X, T* Y, bool square) {
const int idx = threadIdx.x;
__shared__ float reduction_buffer[SUM_KERNEL_NTHREADS];
reduction_buffer[idx] = 0;
// A multilevel reduction.
// N -> 128
if (!square) {
for (int i = idx; i < N; i += SUM_KERNEL_NTHREADS) {
reduction_buffer[idx] += convert::To<T, float>(X[i]);
}
} else {
for (int i = idx; i < N; i += SUM_KERNEL_NTHREADS) {
float Xi = convert::To<T, float>(X[i]);
reduction_buffer[idx] += Xi * Xi;
}
}
__syncthreads();
// 128 -> 32
if (idx < 32) {
reduction_buffer[idx] += reduction_buffer[idx + 32] +
reduction_buffer[idx + 64] + reduction_buffer[idx + 96];
}
__syncthreads();
// 32 -> 1
if (idx == 0) {
float tmp = 0;
for (int i = 0; i < 32; ++i) {
tmp += reduction_buffer[i];
}
*Y = convert::To<float, T>(tmp);
}
}
// According to the benchmarks script
// caffe2/caffe2/experiments/python/device_reduce_sum_bench.py,
// device reduce is slower for N <= 10000.
#define DEVICE_REDUCE_SIZE_THRESHOLD 10000
namespace {
template <typename T>
__global__ void SumConvertKernel(float* sum, T* dest) {
*dest = convert::To<float, T>(*sum);
}
template <typename T, typename IterT>
CAFFE2_CUDA_EXPORT void SumGenericIter(
const int N,
IterT it,
T*& dest,
CUDAContext* context,
Tensor* scratch_ptr) {
size_t memRequired = 0;
hipcub::DeviceReduce::Sum(
nullptr, memRequired, it, dest, N, context->cuda_stream());
auto buffer_size =
static_cast<TIndex>((memRequired + sizeof(T) - 1) / sizeof(T));
if (!dest) {
// allocate one more T at the end of scratch for dest
scratch_ptr->Resize(std::vector<TIndex>{buffer_size + 1});
dest = scratch_ptr->template mutable_data<T>() + buffer_size;
} else {
scratch_ptr->Resize(std::vector<TIndex>{buffer_size});
}
hipcub::DeviceReduce::Sum(
static_cast<void*>(scratch_ptr->template mutable_data<T>()),
memRequired,
it,
dest,
N,
context->cuda_stream());
}
} // namespace
template <>
CAFFE2_CUDA_EXPORT void Sum<float, CUDAContext>(
const int N,
const float* x,
float* y,
CUDAContext* context,
Tensor* scratch_ptr) {
if (scratch_ptr && N > DEVICE_REDUCE_SIZE_THRESHOLD) {
SumGenericIter<float>(N, x, y, context, scratch_ptr);
} else {
hipLaunchKernelGGL(( SumKernel), dim3(1), dim3(SUM_KERNEL_NTHREADS), 0, context->cuda_stream(),
N, x, y, false);
}
}
template <>
CAFFE2_CUDA_EXPORT void Sum<int32_t, CUDAContext>(
const int N,
const int32_t* x,
int32_t* y,
CUDAContext* context,
Tensor* scratch_ptr) {
if (scratch_ptr && N > DEVICE_REDUCE_SIZE_THRESHOLD) {
SumGenericIter<int32_t>(N, x, y, context, scratch_ptr);
} else {
hipLaunchKernelGGL(( SumKernel), dim3(1), dim3(SUM_KERNEL_NTHREADS), 0, context->cuda_stream(),
N, x, y, false);
}
}
namespace {
template <typename T>
struct FloatTransform {
inline __host__ __device__ float operator()(const T v) const {
return convert::To<T, float>(v);
}
};
} // namespace
#define CAFFE2_MATH_SUM_FUNC(T) \
template <> \
CAFFE2_CUDA_EXPORT void Sum<T, CUDAContext>( \
const int N, \
const T* x, \
T* y, \
CUDAContext* context, \
Tensor* scratch_ptr) { \
if (scratch_ptr && N > DEVICE_REDUCE_SIZE_THRESHOLD) { \
FloatTransform<T> transform; \
hipcub::TransformInputIterator<float, FloatTransform<T>, const T*> it( \
x, transform); \
float* sum = nullptr; \
SumGenericIter<float>(N, it, sum, context, scratch_ptr); \
hipLaunchKernelGGL(( SumConvertKernel), dim3(1), dim3(1), 0, context->cuda_stream(), sum, y); \
} else { \
hipLaunchKernelGGL(( SumKernel), dim3(1), dim3(SUM_KERNEL_NTHREADS), 0, context->cuda_stream(), \
N, x, y, false); \
} \
}
CAFFE2_MATH_SUM_FUNC(at::Half)
#undef CAFFE2_MATH_SUM_FUNC
namespace {
template <typename T>
struct SqrTransform {
inline __host__ __device__ T operator()(const T v) const {
return v * v;
}
};
} // namespace
template <>
CAFFE2_CUDA_EXPORT void SumSqr<float, CUDAContext>(
const int N,
const float* x,
float* y,
CUDAContext* context,
Tensor* scratch_ptr) {
if (scratch_ptr && N > DEVICE_REDUCE_SIZE_THRESHOLD) {
SqrTransform<float> transform;
hipcub::TransformInputIterator<float, SqrTransform<float>, const float*> it(
x, transform);
SumGenericIter<float>(N, it, y, context, scratch_ptr);
} else {
hipLaunchKernelGGL(( SumKernel), dim3(1), dim3(SUM_KERNEL_NTHREADS), 0, context->cuda_stream(),
N, x, y, true);
}
}
#define CAFFE2_MATH_SUMSQR_FUNC(T) \
template <> \
CAFFE2_CUDA_EXPORT void SumSqr<T, CUDAContext>( \
const int N, \
const T* x, \
T* y, \
CUDAContext* context, \
Tensor* scratch_ptr) { \
if (scratch_ptr && N > DEVICE_REDUCE_SIZE_THRESHOLD) { \
FloatTransform<T> float_transform; \
hipcub::TransformInputIterator<float, FloatTransform<T>, const T*> \
float_it(x, float_transform); \
SqrTransform<float> sqr_transform; \
hipcub::TransformInputIterator< \
float, \
SqrTransform<float>, \
decltype(float_it)> \
it(float_it, sqr_transform); \
float* sum = nullptr; \
SumGenericIter<float>(N, it, sum, context, scratch_ptr); \
hipLaunchKernelGGL(( SumConvertKernel), dim3(1), dim3(1), 0, context->cuda_stream(), sum, y); \
} else { \
hipLaunchKernelGGL(( SumKernel), dim3(1), dim3(SUM_KERNEL_NTHREADS), 0, context->cuda_stream(), \
N, x, y, true); \
} \
}
CAFFE2_MATH_SUMSQR_FUNC(at::Half)
#undef CAFFE2_MATH_SUMSQR_FUNC
#undef DEVICE_REDUCE_SIZE_THRESHOLD
namespace {
template <typename T>
__global__ void
SelectKernel(const int N, const int D, const T* x, const int* idx, T* y) {
CUDA_1D_KERNEL_LOOP(i, N) {
y[i] = x[i * D + idx[i]];
}
}
} // namespace
template <>
CAFFE2_CUDA_EXPORT void Select<float, CUDAContext>(
const int N,
const int D,
const float* x,
const int* idx,
float* y,
CUDAContext* context) {
hipLaunchKernelGGL(( SelectKernel<float>)
, dim3(CAFFE_GET_BLOCKS(N)),
dim3(CAFFE_CUDA_NUM_THREADS),
0,
context->cuda_stream(), N, D, x, idx, y);
}
template <>
CAFFE2_CUDA_EXPORT void Select<at::Half, CUDAContext>(
const int N,
const int D,
const at::Half* x,
const int* idx,
at::Half* y,
CUDAContext* context) {
hipLaunchKernelGGL(( SelectKernel<at::Half>)
, dim3(CAFFE_GET_BLOCKS(N)),
dim3(CAFFE_CUDA_NUM_THREADS),
0,
context->cuda_stream(), N, D, x, idx, y);
}
namespace {
template <typename TAlpha, typename TData>
__global__ void
ScaleCUDAKernel(const int n, const TAlpha alpha, const TData* x, TData* y) {
CUDA_1D_KERNEL_LOOP(i, n) {
#if __CUDA_ARCH__ >= 350
y[i] = __ldg(x + i) * static_cast<TData>(alpha);
#else
y[i] = x[i] * static_cast<TData>(alpha);
#endif
}
}
template <typename TAlpha, typename TData>
__global__ void
ScaleCUDAKernel(const int n, const TAlpha* alpha, const TData* x, TData* y) {
CUDA_1D_KERNEL_LOOP(i, n) {
#if __CUDA_ARCH__ >= 350
y[i] = __ldg(x + i) * static_cast<TData>(__ldg(alpha));
#else
y[i] = x[i] * static_cast<TData>(*alpha);
#endif
}
}
template <typename T>
__global__ void PowKernel(const int n, const T* x, const T exponent, T* y) {
CUDA_1D_KERNEL_LOOP(i, n) {
y[i] = powf(x[i], exponent);
}
}
} // namespace
template <>
CAFFE2_CUDA_EXPORT void Powx<float, CUDAContext>(
const int N,
const float* a,
const float b,
float* y,
CUDAContext* context) {
hipLaunchKernelGGL(( PowKernel),
dim3(CAFFE_GET_BLOCKS(N)),
dim3(CAFFE_CUDA_NUM_THREADS),
0,
context->cuda_stream(), N, a, b, y);
}
#define DELEGATE_CUBLAS_SCALE_FUNCTION(TAlpha, TData, CuBLASFunc) \
template <> \
CAFFE2_CUDA_EXPORT void Scale<TAlpha, TData, CUDAContext>( \
const int N, \
const TAlpha alpha, \
const TData* x, \
TData* y, \
CUDAContext* context) { \
if (N == 0) { \
return; \
} \
if (x != y) { \
hipMemcpyAsync( \
y, \
x, \
sizeof(TData) * N, \
hipMemcpyDeviceToDevice, \
context->cuda_stream()); \
} \
if (alpha != TAlpha(1)) { \
CUBLAS_ENFORCE(hipblasSetPointerMode( \
context->cublas_handle(), HIPBLAS_POINTER_MODE_HOST)); \
CUBLAS_ENFORCE(CuBLASFunc(context->cublas_handle(), N, &alpha, y, 1)); \
} \
} \
template <> \
CAFFE2_CUDA_EXPORT void Scale<TAlpha, TData, CUDAContext>( \
const int N, \
const TAlpha* alpha, \
const TData* x, \
TData* y, \
CUDAContext* context) { \
if (N == 0) { \
return; \
} \
if (x != y) { \
hipMemcpyAsync( \
y, \
x, \
sizeof(TData) * N, \
hipMemcpyDeviceToDevice, \
context->cuda_stream()); \
} \
CUBLAS_ENFORCE(hipblasSetPointerMode( \
context->cublas_handle(), HIPBLAS_POINTER_MODE_DEVICE)); \
CUBLAS_ENFORCE(CuBLASFunc(context->cublas_handle(), N, alpha, y, 1)); \
}
DELEGATE_CUBLAS_SCALE_FUNCTION(float, float, hipblasSscal)
DELEGATE_CUBLAS_SCALE_FUNCTION(double, double, hipblasDscal)
#undef DELEGATE_CUBLAS_SCALE_FUNCTION
#define CAFFE2_SPECIALIZED_CUDA_SCALE(TAlpha, TData) \
template <> \
CAFFE2_CUDA_EXPORT void Scale<TAlpha, TData, CUDAContext>( \
const int N, \
const TAlpha alpha, \
const TData* x, \
TData* y, \
CUDAContext* context) { \
if (N == 0) { \
return; \
} \
if (alpha == TAlpha(1)) { \
if (x != y) { \
hipMemcpyAsync( \
y, \
x, \
sizeof(TData) * N, \
hipMemcpyDeviceToDevice, \
context->cuda_stream()); \
} \
return; \
} \
hipLaunchKernelGGL(( ScaleCUDAKernel<TAlpha, TData>) \
, dim3(CAFFE_GET_BLOCKS(N)), \
CAFFE_CUDA_NUM_THREADS, \
0, \
context->cuda_stream(), N, alpha, x, y); \
} \
template <> \
CAFFE2_CUDA_EXPORT void Scale<TAlpha, TData, CUDAContext>( \
const int N, \
const TAlpha* alpha, \
const TData* x, \
TData* y, \
CUDAContext* context) { \
if (N == 0) { \
return; \
} \
hipLaunchKernelGGL(( ScaleCUDAKernel<TAlpha, TData>) \
, dim3(CAFFE_GET_BLOCKS(N)), \
CAFFE_CUDA_NUM_THREADS, \
0, \
context->cuda_stream(), N, alpha, x, y); \
}
CAFFE2_SPECIALIZED_CUDA_SCALE(std::int32_t, std::int32_t)
CAFFE2_SPECIALIZED_CUDA_SCALE(std::int64_t, std::int64_t)
#undef CAFFE2_SPECIALIZED_CUDA_SCALE
template <>
CAFFE2_CUDA_EXPORT void Scale<at::Half, at::Half, CUDAContext>(
const int N,
const at::Half alpha,
const at::Half* x,
at::Half* y,
CUDAContext* context) {
if (N == 0) {
return;
}
if (x != y) {
hipMemcpyAsync(
y,
x,
sizeof(at::Half) * N,
hipMemcpyDeviceToDevice,
context->cuda_stream());
}
CUBLAS_ENFORCE(
hipblasSetPointerMode(context->cublas_handle(), HIPBLAS_POINTER_MODE_HOST));
CUBLAS_ENFORCE(hipblasScalEx_v2(
context->cublas_handle(),
N,
&alpha,
HIP_R_16F,
y,
HIP_R_16F,
1,
HIP_R_32F));
}
template <>
CAFFE2_CUDA_EXPORT void Scale<at::Half, at::Half, CUDAContext>(
const int N,
const at::Half* alpha,
const at::Half* x,
at::Half* y,
CUDAContext* context) {
if (N == 0) {
return;
}
if (x != y) {
hipMemcpyAsync(
y,
x,
sizeof(at::Half) * N,
hipMemcpyDeviceToDevice,
context->cuda_stream());
}
CUBLAS_ENFORCE(hipblasSetPointerMode(
context->cublas_handle(), HIPBLAS_POINTER_MODE_DEVICE));
CUBLAS_ENFORCE(hipblasScalEx_v2(
context->cublas_handle(),
N,
alpha,
HIP_R_16F,
y,
HIP_R_16F,
1,
HIP_R_32F));
}
template <>
CAFFE2_CUDA_EXPORT void Scale<float, at::Half, CUDAContext>(
const int N,
const float alpha,
const at::Half* x,
at::Half* y,
CUDAContext* context) {
if (N == 0) {
return;
}
if (x != y) {
hipMemcpyAsync(
y,
x,
sizeof(at::Half) * N,
hipMemcpyDeviceToDevice,
context->cuda_stream());
}
if (alpha != 1.0f) {
CUBLAS_ENFORCE(hipblasSetPointerMode(
context->cublas_handle(), HIPBLAS_POINTER_MODE_HOST));
CUBLAS_ENFORCE(hipblasScalEx_v2(
context->cublas_handle(),
N,
&alpha,
HIP_R_32F,
y,
HIP_R_16F,
1,
HIP_R_32F));
}
}
template <>
CAFFE2_CUDA_EXPORT void Scale<float, at::Half, CUDAContext>(
const int N,
const float* alpha,
const at::Half* x,
at::Half* y,
CUDAContext* context) {
if (N == 0) {
return;
}
if (x != y) {
hipMemcpyAsync(
y,
x,
sizeof(at::Half) * N,
hipMemcpyDeviceToDevice,
context->cuda_stream());
}
CUBLAS_ENFORCE(hipblasSetPointerMode(
context->cublas_handle(), HIPBLAS_POINTER_MODE_DEVICE));
CUBLAS_ENFORCE(hipblasScalEx_v2(
context->cublas_handle(),
N,
alpha,
HIP_R_32F,
y,
HIP_R_16F,
1,
HIP_R_32F));
}
template <>
CAFFE2_CUDA_EXPORT void Axpy<float, CUDAContext>(
const int N,
const float alpha,
const float* X,
float* Y,
CUDAContext* context) {
CUBLAS_ENFORCE(
hipblasSetPointerMode(context->cublas_handle(), HIPBLAS_POINTER_MODE_HOST));
CUBLAS_ENFORCE(hipblasSaxpy(context->cublas_handle(), N, &alpha, X, 1, Y, 1));
}
template <>
CAFFE2_CUDA_EXPORT void Axpy<double, CUDAContext>(
const int N,
const float alpha,
const double* X,
double* Y,
CUDAContext* context) {
double alpha_d{alpha};
CUBLAS_ENFORCE(
hipblasSetPointerMode(context->cublas_handle(), HIPBLAS_POINTER_MODE_HOST));
CUBLAS_ENFORCE(
hipblasDaxpy(context->cublas_handle(), N, &alpha_d, X, 1, Y, 1));
}
template <>
CAFFE2_CUDA_EXPORT void Axpy<at::Half, CUDAContext>(
const int N,
const float alpha,
const at::Half* X,
at::Half* Y,
CUDAContext* context) {
CUBLAS_ENFORCE(
hipblasSetPointerMode(context->cublas_handle(), HIPBLAS_POINTER_MODE_HOST));
CUBLAS_ENFORCE(hipblasAxpyEx_v2(
context->cublas_handle(),
N,
&alpha,
HIP_R_32F,
X,
HIP_R_16F,
1,
Y,
HIP_R_16F,
1,
HIP_R_32F));
}
template <>
CAFFE2_CUDA_EXPORT void Axpy<float, CUDAContext>(
const int N,
const float* alpha,
const float* X,
float* Y,
CUDAContext* context) {
CUBLAS_ENFORCE(hipblasSetPointerMode(
context->cublas_handle(), HIPBLAS_POINTER_MODE_DEVICE));
CUBLAS_ENFORCE(hipblasSaxpy(context->cublas_handle(), N, alpha, X, 1, Y, 1));
}
template <>
CAFFE2_CUDA_EXPORT void Axpy<at::Half, CUDAContext>(
const int N,
const float* alpha,
const at::Half* X,
at::Half* Y,
CUDAContext* context) {
CUBLAS_ENFORCE(hipblasSetPointerMode(
context->cublas_handle(), HIPBLAS_POINTER_MODE_DEVICE));
CUBLAS_ENFORCE(hipblasAxpyEx_v2(
context->cublas_handle(),
N,
alpha,
HIP_R_32F,
X,
HIP_R_16F,
1,
Y,
HIP_R_16F,
1,
HIP_R_32F));
}
namespace {
template <typename TCoeff, typename TData>
__global__ void AxpbyCUDAKernel(
const int N,
const TCoeff a,
const TData* x,
const TCoeff b,
TData* y) {
CUDA_1D_KERNEL_LOOP(i, N) {
#if __CUDA_ARCH__ >= 350
y[i] = __ldg(x + i) * a + y[i] * b;
#else
y[i] = x[i] * a + y[i] * b;
#endif
}
}
template <>
__global__ void AxpbyCUDAKernel<float, at::Half>(
const int N,
const float a,
const at::Half* x,
const float b,
at::Half* y) {
CUDA_1D_KERNEL_LOOP(i, N) {
y[i] = convert::To<float, at::Half>(
convert::To<at::Half, float>(x[i]) * a +
convert::To<at::Half, float>(y[i]) * b);
}
}
template <typename TCoeff, typename TData>
__global__ void AxpbyCUDAKernel(
const int N,
const TCoeff* a,
const TData* x,
const TCoeff* b,
TData* y) {
CUDA_1D_KERNEL_LOOP(i, N) {
#if __CUDA_ARCH__ >= 350
y[i] = __ldg(x + i) * __ldg(a) + y[i] * __ldg(b);
#else
y[i] = x[i] * *a + y[i] * *b;
#endif
}
}
template <>
__global__ void AxpbyCUDAKernel<float, at::Half>(
const int N,
const float* a,
const at::Half* x,
const float* b,
at::Half* y) {
CUDA_1D_KERNEL_LOOP(i, N) {
#if __CUDA_ARCH__ >= 350
y[i] = convert::To<float, at::Half>(
convert::To<at::Half, float>(x[i]) * __ldg(a) +
convert::To<at::Half, float>(y[i]) * __ldg(b));
#else
y[i] = convert::To<float, at::Half>(
convert::To<at::Half, float>(x[i]) * *a +
convert::To<at::Half, float>(y[i]) * *b);
#endif
}
}
} // namespace
#define CAFFE2_SPECIALIZED_CUDA_AXPBY(TCoeff, TData) \
template <> \
CAFFE2_CUDA_EXPORT void Axpby<TCoeff, TData, CUDAContext>( \
const int n, \
const TCoeff a, \
const TData* x, \
const TCoeff b, \
TData* y, \
CUDAContext* context) { \
hipLaunchKernelGGL(( AxpbyCUDAKernel<TCoeff, TData>) \
, dim3(CAFFE_GET_BLOCKS(n)), \
CAFFE_CUDA_NUM_THREADS, \
0, \
context->cuda_stream(), n, a, x, b, y); \
} \
template <> \
CAFFE2_CUDA_EXPORT void Axpby<TCoeff, TData, CUDAContext>( \
const int n, \
const TCoeff* a, \
const TData* x, \
const TCoeff* b, \
TData* y, \
CUDAContext* context) { \
hipLaunchKernelGGL(( AxpbyCUDAKernel<TCoeff, TData>) \
, dim3(CAFFE_GET_BLOCKS(n)), \
CAFFE_CUDA_NUM_THREADS, \
0, \
context->cuda_stream(), n, a, x, b, y); \
}
CAFFE2_SPECIALIZED_CUDA_AXPBY(float, float)
CAFFE2_SPECIALIZED_CUDA_AXPBY(float, at::Half)
#undef CAFFE2_SPECIALIZED_CUDA_AXPBY
namespace {
template <typename T>
__global__ void Im2ColNCHWCUDAKernel(
const int n,
const int input_h,
const int input_w,
const int kernel_h,
const int kernel_w,
const int dilation_h,
const int dilation_w,
const int pad_t,
const int pad_l,
const int stride_h,
const int stride_w,
const int output_h,
const int output_w,
const T* img_data,
T* col_data) {
CUDA_1D_KERNEL_LOOP(index, n) {
const int w_out = index % output_w;
const int h_index = index / output_w;
const int h_out = h_index % output_h;
const int channel_in = h_index / output_h;
const int channel_out = channel_in * kernel_h * kernel_w;
const int h_in = h_out * stride_h - pad_t;
const int w_in = w_out * stride_w - pad_l;
const int output_size = output_h * output_w;
T* col_data_ptr =
col_data + (channel_out * output_h + h_out) * output_w + w_out;
const T* img_data_ptr =
img_data + (channel_in * input_h + h_in) * input_w + w_in;
int dh = 0;
for (int i = 0; i < kernel_h; ++i) {
int dw = 0;
for (int j = 0; j < kernel_w; ++j) {
const int h = h_in + dh;
const int w = w_in + dw;
#if __CUDA_ARCH__ >= 350
*col_data_ptr = utils::IsAGeZeroAndALtB(h, input_h) &&
utils::IsAGeZeroAndALtB(w, input_w)
? __ldg(img_data_ptr + dh * input_w + dw)
: 0;
#else
*col_data_ptr = utils::IsAGeZeroAndALtB(h, input_h) &&
utils::IsAGeZeroAndALtB(w, input_w)
? img_data_ptr[dh * input_w + dw]
: 0;
#endif
col_data_ptr += output_size;
dw += dilation_w;
}
dh += dilation_h;
}
}
}
template <typename T>
__global__ void Im2ColNHWCCUDAKernel(
const int n,
const int input_h,
const int input_w,
const int kernel_h,
const int kernel_w,
const int dilation_h,
const int dilation_w,
const int pad_t,
const int pad_l,
const int stride_h,
const int stride_w,
const int output_w,
const int channels,
const T* img_data,
T* col_data) {
CUDA_1D_KERNEL_LOOP(index, n) {
const int channel_in = index % channels;
const int w_out = index / channels % output_w;
const int h_out = index / channels / output_w;
const int h_in = h_out * stride_h - pad_t;
const int w_in = w_out * stride_w - pad_l;
T* col_data_ptr = col_data +
(h_out * output_w + w_out) * channels * kernel_h * kernel_w +
channel_in;
int dh = 0;
for (int i = 0; i < kernel_h; ++i) {
int dw = 0;
for (int j = 0; j < kernel_w; ++j) {
const int h = h_in + dh;
const int w = w_in + dw;
#if __CUDA_ARCH__ >= 350
*col_data_ptr = utils::IsAGeZeroAndALtB(h, input_h) &&
utils::IsAGeZeroAndALtB(w, input_w)
? __ldg(img_data + (h * input_w + w) * channels + channel_in)
: 0;
#else
*col_data_ptr = utils::IsAGeZeroAndALtB(h, input_h) &&
utils::IsAGeZeroAndALtB(w, input_w)
? img_data[(h * input_w + w) * channels + channel_in]
: 0;
#endif
col_data_ptr += channels;
dw += dilation_w;
}
dh += dilation_h;
}
}
}
template <typename T>
__global__ void Col2ImNCHWCUDAKernel(
const int n,
const int input_h,
const int input_w,
const int patch_h,
const int patch_w,
const int dilation_h,
const int dilation_w,
const int pad_t,
const int pad_l,
const int stride_h,
const int stride_w,
const int output_h,
const int output_w,
const T* col_data,
T* img_data) {
const int dpatch_h = dilation_h * (patch_h - 1) + 1;
const int dpatch_w = dilation_w * (patch_w - 1) + 1;
CUDA_1D_KERNEL_LOOP(index, n) {
T val = 0;
const int w = index % input_w + pad_l;
const int h = index / input_w % input_h + pad_t;
const int c = index / (input_h * input_w);
// compute the start and end of the output
const int w_col_start = (w < dpatch_w) ? 0 : (w - dpatch_w) / stride_w + 1;
const int w_col_end = min(w / stride_w + 1, output_w);
const int h_col_start = (h < dpatch_h) ? 0 : (h - dpatch_h) / stride_h + 1;
const int h_col_end = min(h / stride_h + 1, output_h);
for (int h_col = h_col_start; h_col < h_col_end; ++h_col) {
for (int w_col = w_col_start; w_col < w_col_end; ++w_col) {
int h_k = (h - h_col * stride_h);
int w_k = (w - w_col * stride_w);
if (h_k % dilation_h == 0 && w_k % dilation_w == 0) {
h_k /= dilation_h;
w_k /= dilation_w;
const int col_data_index =
(((c * patch_h + h_k) * patch_w + w_k) * output_h + h_col) *
output_w +
w_col;
#if __CUDA_ARCH__ >= 350
val += __ldg(col_data + col_data_index);
#else
val += col_data[col_data_index];
#endif
}
}
}
img_data[index] = val;
}
}
template <typename T>
__global__ void Col2ImNHWCCUDAKernel(
const int n,
const int input_w,
const int channels,
const int patch_h,
const int patch_w,
const int dilation_h,
const int dilation_w,
const int pad_t,
const int pad_l,
const int stride_h,
const int stride_w,
const int output_h,
const int output_w,
const T* col_data,
T* img_data) {
const int dpatch_h = dilation_h * (patch_h - 1) + 1;
const int dpatch_w = dilation_w * (patch_w - 1) + 1;
CUDA_1D_KERNEL_LOOP(index, n) {
T val = 0;
const int c = index % channels;
const int w = index / channels % input_w + pad_l;
const int h = index / channels / input_w + pad_t;
// compute the start and end of the output
const int w_col_start = (w < dpatch_w) ? 0 : (w - dpatch_w) / stride_w + 1;
const int w_col_end = min(w / stride_w + 1, output_w);
const int h_col_start = (h < dpatch_h) ? 0 : (h - dpatch_h) / stride_h + 1;
const int h_col_end = min(h / stride_h + 1, output_h);
const int channels_col = patch_h * patch_w * channels;
for (int h_col = h_col_start; h_col < h_col_end; ++h_col) {
for (int w_col = w_col_start; w_col < w_col_end; ++w_col) {
int h_k = h - h_col * stride_h;
int w_k = w - w_col * stride_w;
if (h_k % dilation_h == 0 && w_k % dilation_w == 0) {
h_k /= dilation_h;
w_k /= dilation_w;
const int c_col = (h_k * patch_w + w_k) * channels + c;
#if __CUDA_ARCH__ >= 350
val += __ldg(
col_data + (h_col * output_w + w_col) * channels_col + c_col);
#else
val += col_data[(h_col * output_w + w_col) * channels_col + c_col];
#endif
}
}
}
img_data[index] = val;
}
}
template <typename T, int N, bool kCol2Im>
__global__ void Im2ColNdNCHWCUDAKernel(
const int outer_size,
const int inner_size,
const int kernel_size,
SimpleArray<int, N + 1> img_shape,
SimpleArray<int, N + 1> col_shape,
SimpleArray<int, N> kernel_shape,
SimpleArray<int, N> stride,
SimpleArray<int, N> dilation,
SimpleArray<int, N> pad,
const T* X_data,
T* Y_data) {
int d_offset[N];
int d_iter[N];
for (int i = blockIdx.x; i < outer_size; i += gridDim.x) {
int offset_i = i;
#pragma unroll
for (int d_i = N - 1; d_i >= 0; --d_i) {
d_offset[d_i] = offset_i % kernel_shape.data[d_i];
offset_i /= kernel_shape.data[d_i];
}
for (int j = threadIdx.x; j < inner_size; j += blockDim.x) {
int offset_j = j;
#pragma unroll
for (int d_i = N - 1; d_i >= 0; --d_i) {
d_iter[d_i] = offset_j % col_shape.data[d_i + 1];
offset_j /= col_shape.data[d_i + 1];
}
const int col_index = i * inner_size + j;
int img_index = i / kernel_size;
bool is_padding = false;
#pragma unroll
for (int d_i = 0; d_i < N; ++d_i) {
const int d_img = d_iter[d_i] * stride.data[d_i] - pad.data[d_i] +
d_offset[d_i] * dilation.data[d_i];
is_padding |= !utils::IsAGeZeroAndALtB(d_img, img_shape.data[d_i + 1]);
img_index = img_index * img_shape.data[d_i + 1] + d_img;
}
#if __CUDA_ARCH__ >= 350
if (!kCol2Im) {
Y_data[col_index] = is_padding ? 0 : __ldg(X_data + img_index);
} else if (!is_padding) {
atomicAdd(Y_data + img_index, __ldg(X_data + col_index));
}
#else
if (!kCol2Im) {
Y_data[col_index] = is_padding ? 0 : X_data[img_index];
} else if (!is_padding) {
atomicAdd(Y_data + img_index, X_data[col_index]);
}
#endif
}
}
}
template <typename T, int N>
CAFFE2_CUDA_EXPORT void Im2ColNdNCHWCUDAImpl(
const int img_size,
const int col_size,
const int* img_shape,
const int* col_shape,
const int* kernel_shape,
const int* stride,
const int* dilation,
const int* pad,
const float* img_data,
float* col_data,
CUDAContext* context) {
const int outer_size = col_shape[0];
const int inner_size = col_size / outer_size;
const int kernel_size = std::accumulate(
kernel_shape, kernel_shape + N, 1, std::multiplies<int>());
SimpleArray<int, N + 1> img_shape_array;
SimpleArray<int, N + 1> col_shape_array;
SimpleArray<int, N> kernel_shape_array;
SimpleArray<int, N> stride_array;
SimpleArray<int, N> dilation_array;
SimpleArray<int, N> pad_array;
std::memcpy(img_shape_array.data, img_shape, (N + 1) * sizeof(int));
std::memcpy(col_shape_array.data, col_shape, (N + 1) * sizeof(int));
std::memcpy(kernel_shape_array.data, kernel_shape, N * sizeof(int));
std::memcpy(stride_array.data, stride, N * sizeof(int));
std::memcpy(dilation_array.data, dilation, N * sizeof(int));
std::memcpy(pad_array.data, pad, N * sizeof(int));
hipLaunchKernelGGL(( Im2ColNdNCHWCUDAKernel<T, N, false>)
, dim3(::min(outer_size, CAFFE_MAXIMUM_NUM_BLOCKS)),
dim3(CAFFE_CUDA_NUM_THREADS),
0,
context->cuda_stream(),
outer_size,
inner_size,
kernel_size,
img_shape_array,
col_shape_array,
kernel_shape_array,
stride_array,
dilation_array,
pad_array,
img_data,
col_data);
}
template <typename T, int N>
CAFFE2_CUDA_EXPORT void Col2ImNdNCHWCUDAImpl(
const int img_size,
const int col_size,
const int* img_shape,
const int* col_shape,
const int* kernel_shape,
const int* stride,
const int* dilation,
const int* pad,
const float* col_data,
float* img_data,
CUDAContext* context) {
const int outer_size = col_shape[0];
const int inner_size = col_size / outer_size;
const int kernel_size = std::accumulate(
kernel_shape, kernel_shape + N, 1, std::multiplies<int>());
SimpleArray<int, N + 1> img_shape_array;
SimpleArray<int, N + 1> col_shape_array;
SimpleArray<int, N> kernel_shape_array;
SimpleArray<int, N> stride_array;
SimpleArray<int, N> dilation_array;
SimpleArray<int, N> pad_array;
std::memcpy(img_shape_array.data, img_shape, (N + 1) * sizeof(int));
std::memcpy(col_shape_array.data, col_shape, (N + 1) * sizeof(int));
std::memcpy(kernel_shape_array.data, kernel_shape, N * sizeof(int));
std::memcpy(stride_array.data, stride, N * sizeof(int));
std::memcpy(dilation_array.data, dilation, N * sizeof(int));
std::memcpy(pad_array.data, pad, N * sizeof(int));
Set<T, CUDAContext>(img_size, 0, img_data, context);
hipLaunchKernelGGL(( Im2ColNdNCHWCUDAKernel<T, N, true>)
, dim3(::min(outer_size, CAFFE_MAXIMUM_NUM_BLOCKS)),
dim3(CAFFE_CUDA_NUM_THREADS),
0,
context->cuda_stream(),
outer_size,
inner_size,
kernel_size,
img_shape_array,
col_shape_array,
kernel_shape_array,
stride_array,
dilation_array,
pad_array,
col_data,
img_data);
}
} // namespace
template <>
CAFFE2_CUDA_EXPORT void Im2Col<float, CUDAContext, StorageOrder::NCHW>(
const int channels,
const int height,
const int width,
const int kernel_h,
const int kernel_w,
const int dilation_h,
const int dilation_w,
const int pad_t,
const int pad_l,
const int pad_b,
const int pad_r,
const int stride_h,
const int stride_w,
const float* img_data,
float* col_data,
CUDAContext* context,
const int /* groups */) {
const int dkernel_h = dilation_h * (kernel_h - 1) + 1;
const int dkernel_w = dilation_w * (kernel_w - 1) + 1;
const int output_h = (height + pad_t + pad_b - dkernel_h) / stride_h + 1;
const int output_w = (width + pad_l + pad_r - dkernel_w) / stride_w + 1;
const int num_kernels = channels * output_h * output_w;
hipLaunchKernelGGL(( Im2ColNCHWCUDAKernel<float>)
, dim3(CAFFE_GET_BLOCKS(num_kernels)),
dim3(CAFFE_CUDA_NUM_THREADS),
0,
context->cuda_stream(),
num_kernels,
height,
width,
kernel_h,
kernel_w,
dilation_h,
dilation_w,
pad_t,
pad_l,
stride_h,
stride_w,
output_h,
output_w,
img_data,
col_data);
}
template <>
CAFFE2_CUDA_EXPORT void Im2Col<float, CUDAContext, StorageOrder::NHWC>(
const int channels,
const int height,
const int width,
const int kernel_h,
const int kernel_w,
const int dilation_h,
const int dilation_w,
const int pad_t,
const int pad_l,
const int pad_b,
const int pad_r,
const int stride_h,
const int stride_w,
const float* img_data,
float* col_data,
CUDAContext* context,
const int groups) {
CAFFE_ENFORCE_EQ(groups, 1, "groups must be 1 for GPU NHWC Im2Col");
const int dkernel_h = dilation_h * (kernel_h - 1) + 1;
const int dkernel_w = dilation_w * (kernel_w - 1) + 1;
const int output_h = (height + pad_t + pad_b - dkernel_h) / stride_h + 1;
const int output_w = (width + pad_l + pad_r - dkernel_w) / stride_w + 1;
const int num_kernels = output_h * output_w * channels;
hipLaunchKernelGGL(( Im2ColNHWCCUDAKernel<float>)
, dim3(CAFFE_GET_BLOCKS(num_kernels)),
dim3(CAFFE_CUDA_NUM_THREADS),
0,
context->cuda_stream(),
num_kernels,
height,
width,
kernel_h,
kernel_w,
dilation_h,
dilation_w,
pad_t,
pad_l,
stride_h,
stride_w,
output_w,
channels,
img_data,
col_data);
}
template <>
CAFFE2_CUDA_EXPORT void Col2Im<float, CUDAContext, StorageOrder::NCHW>(
const int channels,
const int height,
const int width,
const int kernel_h,
const int kernel_w,
const int dilation_h,
const int dilation_w,
const int pad_t,
const int pad_l,
const int pad_b,
const int pad_r,
const int stride_h,
const int stride_w,
const float* col_data,
float* img_data,
CUDAContext* context,
const int /* groups */) {
const int dkernel_h = dilation_h * (kernel_h - 1) + 1;
const int dkernel_w = dilation_w * (kernel_w - 1) + 1;
const int output_h = (height + pad_t + pad_b - dkernel_h) / stride_h + 1;
const int output_w = (width + pad_l + pad_r - dkernel_w) / stride_w + 1;
const int num_kernels = channels * height * width;
hipLaunchKernelGGL(( Col2ImNCHWCUDAKernel<float>)
, dim3(CAFFE_GET_BLOCKS(num_kernels)),
dim3(CAFFE_CUDA_NUM_THREADS),
0,
context->cuda_stream(),
num_kernels,
height,
width,
kernel_h,
kernel_w,
dilation_h,
dilation_w,
pad_t,
pad_l,
stride_h,
stride_w,
output_h,
output_w,
col_data,
img_data);
}
template <>
CAFFE2_CUDA_EXPORT void Col2Im<float, CUDAContext, StorageOrder::NHWC>(
const int channels,
const int height,
const int width,
const int kernel_h,
const int kernel_w,
const int dilation_h,
const int dilation_w,
const int pad_t,
const int pad_l,
const int pad_b,
const int pad_r,
const int stride_h,
const int stride_w,
const float* col_data,
float* img_data,
CUDAContext* context,
const int groups) {
CAFFE_ENFORCE_EQ(groups, 1, "groups must be 1 for GPU NHWC Col2Im");
const int dkernel_h = dilation_h * (kernel_h - 1) + 1;
const int dkernel_w = dilation_w * (kernel_w - 1) + 1;
const int output_h = (height + pad_t + pad_b - dkernel_h) / stride_h + 1;
const int output_w = (width + pad_l + pad_r - dkernel_w) / stride_w + 1;
const int num_kernels = height * width * channels;
hipLaunchKernelGGL(( Col2ImNHWCCUDAKernel<float>)
, dim3(CAFFE_GET_BLOCKS(num_kernels)),
dim3(CAFFE_CUDA_NUM_THREADS),
0,
context->cuda_stream(),
num_kernels,
width,
channels,
kernel_h,
kernel_w,
dilation_h,
dilation_w,
pad_t,
pad_l,
stride_h,
stride_w,
output_h,
output_w,
col_data,
img_data);
}
template <>
CAFFE2_CUDA_EXPORT void Im2ColNd<float, CUDAContext, StorageOrder::NCHW>(
const int N,
const int img_size,
const int col_size,
const int* img_shape,
const int* col_shape,
const int* kernel_shape,
const int* stride,
const int* dilation,
const int* pad,
const float* img_data,
float* col_data,
CUDAContext* context) {
DISPATCH_FUNCTION_BY_VALUE_WITH_TYPE_1(
N,
Im2ColNdNCHWCUDAImpl,
float,
img_size,
col_size,
img_shape,
col_shape,
kernel_shape,
stride,
dilation,
pad,
img_data,
col_data,
context);
}
template <>
CAFFE2_CUDA_EXPORT void Col2ImNd<float, CUDAContext, StorageOrder::NCHW>(
const int N,
const int img_size,
const int col_size,
const int* img_shape,
const int* col_shape,
const int* kernel_shape,
const int* stride,
const int* dilation,
const int* pad,
const float* col_data,
float* img_data,
CUDAContext* context) {
DISPATCH_FUNCTION_BY_VALUE_WITH_TYPE_1(
N,
Col2ImNdNCHWCUDAImpl,
float,
img_size,
col_size,
img_shape,
col_shape,
kernel_shape,
stride,
dilation,
pad,
col_data,
img_data,
context);
}
template <>
CAFFE2_CUDA_EXPORT void CopyMatrix<CUDAContext>(
const size_t itemsize,
const int M,
const int N,
const void* A,
const int lda,
void* B,
const int ldb,
CUDAContext* context,
TypeMeta::TypedCopy copy) {
CAFFE_ENFORCE(!copy, "Copy constructor is not supported in CUDA context");
hipMemcpy2DAsync(
B,
ldb * itemsize,
A,
lda * itemsize,
N * itemsize,
M,
hipMemcpyDeviceToDevice,
context->cuda_stream());
}
#define CAFFE2_SPECIALIZED_CUDA_COPY_MATRIX(T) \
template <> \
void CopyMatrix<T, CUDAContext>( \
const int M, \
const int N, \
const T* A, \
const int lda, \
T* B, \
const int ldb, \
CUDAContext* context) { \
if (M == 0 || N == 0) { \
return; \
} \
hipMemcpy2DAsync( \
B, \
sizeof(T) * ldb, \
A, \
sizeof(T) * lda, \
sizeof(T) * N, \
M, \
hipMemcpyDeviceToDevice, \
context->cuda_stream()); \
}
CAFFE2_SPECIALIZED_CUDA_COPY_MATRIX(float)
CAFFE2_SPECIALIZED_CUDA_COPY_MATRIX(double)
CAFFE2_SPECIALIZED_CUDA_COPY_MATRIX(int)
CAFFE2_SPECIALIZED_CUDA_COPY_MATRIX(TIndex)
#undef CAFFE2_SPECIALIZED_CUDA_COPY_MATRIX
template <>
CAFFE2_CUDA_EXPORT void CopyVector<float, CUDAContext>(
const int N,
const float* src,
float* dst,
CUDAContext* context) {
if (src != dst && N > 0) {
hipMemcpyAsync(
dst,
src,
sizeof(float) * N,
hipMemcpyDeviceToDevice,
context->cuda_stream());
}
}
namespace {
template <typename T>
using BlockReduce = hipcub::BlockReduce<T, CAFFE_CUDA_NUM_THREADS>;
template <typename T, class Reducer>
__global__ void RowwiseReduceKernel(
const int rows,
const int cols,
const Reducer reducer,
const T init,
const T alpha,
const T* X,
T* Y) {
__shared__ typename BlockReduce<T>::TempStorage temp_storage;
for (int i = blockIdx.x; i < rows; i += gridDim.x) {
T val = init;
for (int j = threadIdx.x; j < cols; j += blockDim.x) {
val = reducer(X[i * cols + j], val);
}
val = BlockReduce<T>(temp_storage).Reduce(val, reducer);
if (threadIdx.x == 0) {
Y[i] = val * alpha;
}
__syncthreads();
}
}
template <typename T, class Reducer>
__global__ void ColwiseReduceKernel(
const int rows,
const int cols,
const Reducer reducer,
const T init,
const T alpha,
const T* X,
T* Y) {
__shared__ typename BlockReduce<T>::TempStorage temp_storage;
for (int i = blockIdx.x; i < cols; i += gridDim.x) {
T val = init;
for (int j = threadIdx.x; j < rows; j += blockDim.x) {
val = reducer(X[j * cols + i], val);
}
val = BlockReduce<T>(temp_storage).Reduce(val, reducer);
if (threadIdx.x == 0) {
Y[i] = val * alpha;
}
__syncthreads();
}
}
} // namespace
#define CAFFE2_SPECIALIZED_CUDA_ROWWISE_MAX(T) \
template <> \
CAFFE2_CUDA_EXPORT void RowwiseMax<T, CUDAContext>( \
const int N, const int D, const T* x, T* y, CUDAContext* context) { \
hipLaunchKernelGGL(( RowwiseReduceKernel), \
::min(N, CAFFE_MAXIMUM_NUM_BLOCKS), \
CAFFE_CUDA_NUM_THREADS, \
0, \
context->cuda_stream(), \
N, D, hipcub::Max(), std::numeric_limits<T>::lowest(), T(1), x, y); \
}
CAFFE2_SPECIALIZED_CUDA_ROWWISE_MAX(float)
#undef CAFFE2_SPECIALIZED_CUDA_ROWWISE_MAX
#define CAFFE2_SPECIALIZED_CUDA_COLWISE_MAX(T) \
template <> \
CAFFE2_CUDA_EXPORT void ColwiseMax<T, CUDAContext>( \
const int N, const int D, const T* x, T* y, CUDAContext* context) { \
hipLaunchKernelGGL(( ColwiseReduceKernel), \
::min(D, CAFFE_MAXIMUM_NUM_BLOCKS), \
CAFFE_CUDA_NUM_THREADS, \
0, \
context->cuda_stream(), \
N, D, hipcub::Max(), std::numeric_limits<T>::lowest(), T(1), x, y); \
}
CAFFE2_SPECIALIZED_CUDA_COLWISE_MAX(float)
#undef CAFFE2_SPECIALIZED_CUDA_COLWISE_MAX
namespace {
__global__ void
maximum_kernel(const int N, const float alpha, const float* x, float* y) {
CUDA_1D_KERNEL_LOOP(i, N) {
y[i] = fmaxf(x[i], alpha);
}
}
} // namespace
template <>
CAFFE2_CUDA_EXPORT void Maximum(
const int N,
const float alpha,
const float* x,
float* y,
CUDAContext* context) {
hipLaunchKernelGGL(( maximum_kernel),
dim3(::min(N, CAFFE_MAXIMUM_NUM_BLOCKS)),
dim3(CAFFE_CUDA_NUM_THREADS),
0,
context->cuda_stream(), N, alpha, x, y);
}
namespace {
template <typename T, class Reducer, int D>
__global__ void ReduceTensorCUDAKernel(
const int outer_size,
const int inner_size,
SimpleArray<int, D> X_strides,
SimpleArray<FixedDivisor<int>, D> Y_dims,
const Reducer reducer,
const T init,
const T alpha,
const T* X,
T* Y) {
__shared__ typename BlockReduce<T>::TempStorage temp_storage;
for (int i = blockIdx.x; i < outer_size; i += gridDim.x) {
T val = init;
for (int j = threadIdx.x; j < inner_size; j += blockDim.x) {
int X_index = 0;
int Y_index = i * inner_size + j;
#pragma unroll
for (int d = D - 1; d >= 0; --d) {
int r;
Y_dims.data[d].DivMod(Y_index, &Y_index, &r);
X_index += r * X_strides.data[d];
}
#if __CUDA_ARCH__ >= 350
val = reducer(val, __ldg(X + X_index));
#else
val = reducer(val, X[X_index]);
#endif
}
val = BlockReduce<T>(temp_storage).Reduce(val, reducer);
if (threadIdx.x == 0) {
Y[i] = val * alpha;
}
__syncthreads();
}
}
template <typename T, class Reducer, int D>
CAFFE2_CUDA_EXPORT void ReduceTensorCUDAImpl(
const int outer_size,
const int inner_size,
const int* dims,
const int* axes,
const Reducer& reducer,
const T init,
const T alpha,
const T* X,
T* Y,
CUDAContext* context) {
SimpleArray<int, D> X_strides;
SimpleArray<FixedDivisor<int>, D> Y_dims;
utils::ComputeTransposedStrides(D, dims, axes, X_strides.data);
for (int i = 0; i < D; ++i) {
Y_dims.data[i] = FixedDivisor<int>(dims[axes[i]]);
}
hipLaunchKernelGGL(( ReduceTensorCUDAKernel<T, Reducer, D>)
, dim3(::min(outer_size, CAFFE_MAXIMUM_NUM_BLOCKS)),
dim3(CAFFE_CUDA_NUM_THREADS),
0,
context->cuda_stream(),
outer_size,
inner_size,
X_strides,
Y_dims,
reducer,
init,
alpha,
X,
Y);
}
template <typename T, class Reducer>
CAFFE2_CUDA_EXPORT void ReduceTensorCUDA(
const int num_dims,
const int* dims,
const int num_axes,
const int* axes,
const Reducer& reducer,
const T init,
const T alpha,
const T* X,
T* Y,
CUDAContext* context) {
CAFFE_ENFORCE_LE(num_axes, num_dims);
std::vector<int> Y_dims_vector(dims, dims + num_dims);
for (int i = 0; i < num_axes; ++i) {
Y_dims_vector[axes[i]] = 1;
}
const int* X_dims = dims;
const int* Y_dims = Y_dims_vector.data();
const int X_size =
std::accumulate(X_dims, X_dims + num_dims, 1, std::multiplies<int>());
const int Y_size =
std::accumulate(Y_dims, Y_dims + num_dims, 1, std::multiplies<int>());
if (X_size == 0) {
Set<T, CUDAContext>(Y_size, alpha * init, Y, context);
return;
}
if (alpha == T(0)) {
Set<T, CUDAContext>(Y_size, T(0), Y, context);
return;
}
if (std::equal(X_dims, X_dims + num_dims, Y_dims)) {
Scale<T, T, CUDAContext>(X_size, alpha, X, Y, context);
return;
}
int rows;
int cols;
if (utils::IsRowwiseReduce(num_dims, X_dims, Y_dims, &rows, &cols)) {
hipLaunchKernelGGL(( RowwiseReduceKernel<T>)
, dim3(::min(rows, CAFFE_MAXIMUM_NUM_BLOCKS)),
dim3(CAFFE_CUDA_NUM_THREADS),
0,
context->cuda_stream(), rows, cols, reducer, init, alpha, X, Y);
return;
}
if (utils::IsColwiseReduce(num_dims, X_dims, Y_dims, &rows, &cols)) {
hipLaunchKernelGGL(( ColwiseReduceKernel<T>)
, dim3(::min(rows, CAFFE_MAXIMUM_NUM_BLOCKS)),
dim3(CAFFE_CUDA_NUM_THREADS),
0,
context->cuda_stream(), rows, cols, reducer, init, alpha, X, Y);
return;
}
std::vector<int> transpose_axes(num_dims);
utils::ComputeTransposeAxesForReduceOp(
num_dims, num_axes, axes, transpose_axes.data());
const int outer_size = Y_size;
const int inner_size = X_size / Y_size;
DISPATCH_FUNCTION_BY_VALUE_WITH_TYPE_2(
num_dims,
ReduceTensorCUDAImpl,
T,
Reducer,
outer_size,
inner_size,
dims,
transpose_axes.data(),
reducer,
init,
alpha,
X,
Y,
context);
}
} // namespace
#define CAFFE2_SPECIALIZED_CUDA_REDUCE_MIN(T) \
template <> \
CAFFE2_CUDA_EXPORT void ReduceMin<T, CUDAContext>( \
const int num_dims, \
const int* dims, \
const int num_axes, \
const int* axes, \
const T alpha, \
const T* X, \
T* Y, \
CUDAContext* context) { \
ReduceTensorCUDA( \
num_dims, \
dims, \
num_axes, \
axes, \
hipcub::Min(), \
std::numeric_limits<T>::max(), \
alpha, \
X, \
Y, \
context); \
}
CAFFE2_SPECIALIZED_CUDA_REDUCE_MIN(std::int32_t)
CAFFE2_SPECIALIZED_CUDA_REDUCE_MIN(std::int64_t)
CAFFE2_SPECIALIZED_CUDA_REDUCE_MIN(float)
CAFFE2_SPECIALIZED_CUDA_REDUCE_MIN(double)
#undef CAFFE2_SPECIALIZED_CUDA_REDUCE_MIN
#define CAFFE2_SPECIALIZED_CUDA_REDUCE_MAX(T) \
template <> \
CAFFE2_CUDA_EXPORT void ReduceMax<T, CUDAContext>( \
const int num_dims, \
const int* dims, \
const int num_axes, \
const int* axes, \
const T alpha, \
const T* X, \
T* Y, \
CUDAContext* context) { \
ReduceTensorCUDA( \
num_dims, \
dims, \
num_axes, \
axes, \
hipcub::Max(), \
std::numeric_limits<T>::lowest(), \
alpha, \
X, \
Y, \
context); \
}
CAFFE2_SPECIALIZED_CUDA_REDUCE_MAX(std::int32_t)
CAFFE2_SPECIALIZED_CUDA_REDUCE_MAX(std::int64_t)
CAFFE2_SPECIALIZED_CUDA_REDUCE_MAX(float)
CAFFE2_SPECIALIZED_CUDA_REDUCE_MAX(double)
#undef CAFFE2_SPECIALIZED_CUDA_REDUCE_MAX
#define CAFFE2_SPECIALIZED_CUDA_REDUCE_SUM(T) \
template <> \
CAFFE2_CUDA_EXPORT void ReduceSum<T, CUDAContext>( \
const int num_dims, \
const int* dims, \
const int num_axes, \
const int* axes, \
const T alpha, \
const T* X, \
T* Y, \
CUDAContext* context) { \
ReduceTensorCUDA( \
num_dims, \
dims, \
num_axes, \
axes, \
hipcub::Sum(), \
T(0), \
alpha, \
X, \
Y, \
context); \
}
CAFFE2_SPECIALIZED_CUDA_REDUCE_SUM(std::int32_t)
CAFFE2_SPECIALIZED_CUDA_REDUCE_SUM(std::int64_t)
CAFFE2_SPECIALIZED_CUDA_REDUCE_SUM(float)
CAFFE2_SPECIALIZED_CUDA_REDUCE_SUM(double)
#undef CAFFE2_SPECIALIZED_CUDA_REDUCE_SUM
#define CAFFE2_SPECIALIZED_CUDA_REDUCE_MEAN(T) \
template <> \
CAFFE2_CUDA_EXPORT void ReduceMean<T, CUDAContext>( \
const int num_dims, \
const int* dims, \
const int num_axes, \
const int* axes, \
const T alpha, \
const T* X, \
T* Y, \
CUDAContext* context) { \
int scale = 1; \
for (int i = 0; i < num_axes; ++i) { \
scale *= dims[axes[i]]; \
} \
ReduceTensorCUDA( \
num_dims, \
dims, \
num_axes, \
axes, \
hipcub::Sum(), \
T(0), \
alpha / static_cast<T>(scale), \
X, \
Y, \
context); \
}
CAFFE2_SPECIALIZED_CUDA_REDUCE_MEAN(float)
#undef CAFFE2_SPECIALIZED_CUDA_REDUCE_MEAN
namespace {
template <typename T, int D>
__global__ void BroadcastCUDAKernel(
const int Y_size,
const SimpleArray<int, D> X_strides,
const SimpleArray<FixedDivisor<int>, D> Y_dims,
const T alpha,
const T* X,
T* Y) {
CUDA_1D_KERNEL_LOOP(Y_index, Y_size) {
int X_index = 0;
int Y_index_val = Y_index;
#pragma unroll
for (int i = D - 1; i >= 0; --i) {
int d;
Y_dims.data[i].DivMod(Y_index_val, &Y_index_val, &d);
X_index += d * X_strides.data[i];
}
#if __CUDA_ARCH__ >= 350
Y[Y_index] = __ldg(X + X_index) * alpha;
#else
Y[Y_index] = X[X_index] * alpha;
#endif
}
}
template <typename T, int D>
CAFFE2_CUDA_EXPORT void BroadcastCUDAImpl(
const int X_ndim,
const int* X_dims,
const int* Y_dims,
const T alpha,
const T* X,
T* Y,
CUDAContext* context) {
SimpleArray<int, D> X_strides_array;
SimpleArray<FixedDivisor<int>, D> Y_dims_array;
const int d = D - X_ndim;
std::fill(X_strides_array.data, X_strides_array.data + d, 0);
int cur_stride = 1;
for (int i = D - 1; i >= d; --i) {
CAFFE_ENFORCE(X_dims[i - d] == 1 || X_dims[i - d] == Y_dims[i]);
X_strides_array.data[i] = X_dims[i - d] == 1 ? 0 : cur_stride;
cur_stride *= X_dims[i - d];
}
for (int i = 0; i < D; ++i) {
if (Y_dims[i] == 0) {
return;
}
Y_dims_array.data[i] = FixedDivisor<int>(Y_dims[i]);
}
const int Y_size =
std::accumulate(Y_dims, Y_dims + D, 1, std::multiplies<int>());
hipLaunchKernelGGL(( BroadcastCUDAKernel<T, D>)
, dim3(CAFFE_GET_BLOCKS(Y_size)),
dim3(CAFFE_CUDA_NUM_THREADS),
0,
context->cuda_stream(),
Y_size, X_strides_array, Y_dims_array, alpha, X, Y);
}
} // namespace
#define CAFFE2_SPECIALIZED_CUDA_BROADCAST(T) \
template <> \
CAFFE2_CUDA_EXPORT void Broadcast<T, CUDAContext>( \
const int X_ndim, \
const int* X_dims, \
const int Y_ndim, \
const int* Y_dims, \
const T alpha, \
const T* X, \
T* Y, \
CUDAContext* context) { \
CAFFE_ENFORCE_LE(X_ndim, Y_ndim); \
DISPATCH_FUNCTION_BY_VALUE_WITH_TYPE_1( \
Y_ndim, \
BroadcastCUDAImpl, \
T, \
X_ndim, \
X_dims, \
Y_dims, \
alpha, \
X, \
Y, \
context); \
}
CAFFE2_SPECIALIZED_CUDA_BROADCAST(std::int32_t)
CAFFE2_SPECIALIZED_CUDA_BROADCAST(std::int64_t)
CAFFE2_SPECIALIZED_CUDA_BROADCAST(float)
CAFFE2_SPECIALIZED_CUDA_BROADCAST(double)
#undef CAFFE2_SPECIALIZED_CUDA_BROADCAST
namespace {
template <typename T>
__global__ void RowwiseMomentsCUDAKernel(
const int rows,
const int cols,
const T* X,
T* mean,
T* variance) {
__shared__ typename BlockReduce<T>::TempStorage m_storage;
__shared__ typename BlockReduce<T>::TempStorage v_storage;
const T scale = T(1) / static_cast<T>(cols);
for (int i = blockIdx.x; i < rows; i += gridDim.x) {
T m_val = 0;
T v_val = 0;
for (int j = threadIdx.x; j < cols; j += blockDim.x) {
const int X_index = i * cols + j;
#if __CUDA_ARCH__ >= 350
m_val += __ldg(X + X_index);
v_val += __ldg(X + X_index) * __ldg(X + X_index);
#else
m_val += X[X_index];
v_val += X[X_index] * X[X_index];
#endif
}
m_val = BlockReduce<T>(m_storage).Sum(m_val);
v_val = BlockReduce<T>(v_storage).Sum(v_val);
if (threadIdx.x == 0) {
const T mu = m_val * scale;
mean[i] = mu;
variance[i] = v_val * scale - mu * mu;
}
__syncthreads();
}
}
template <typename T>
__global__ void ColwiseMomentsCUDAKernel(
const int rows,
const int cols,
const T* X,
T* mean,
T* variance) {
__shared__ typename BlockReduce<T>::TempStorage m_storage;
__shared__ typename BlockReduce<T>::TempStorage v_storage;
const T scale = T(1) / static_cast<T>(rows);
for (int i = blockIdx.x; i < cols; i += gridDim.x) {
T m_val = 0;
T v_val = 0;
for (int j = threadIdx.x; j < rows; j += blockDim.x) {
const int X_index = j * cols + i;
#if __CUDA_ARCH__ >= 350
m_val += __ldg(X + X_index);
v_val += __ldg(X + X_index) * __ldg(X + X_index);
#else
m_val += X[X_index];
v_val += X[X_index] * X[X_index];
#endif
}
m_val = BlockReduce<T>(m_storage).Sum(m_val);
v_val = BlockReduce<T>(v_storage).Sum(v_val);
if (threadIdx.x == 0) {
const T mu = m_val * scale;
mean[i] = mu;
variance[i] = v_val * scale - mu * mu;
}
__syncthreads();
}
}
template <typename T, int D>
__global__ void MomentsCUDAKernel(
const int outer_size,
const int inner_size,
SimpleArray<int, D> X_strides,
SimpleArray<FixedDivisor<int>, D> Y_dims,
const T* X,
T* mean,
T* variance) {
__shared__ typename BlockReduce<T>::TempStorage m_storage;
__shared__ typename BlockReduce<T>::TempStorage v_storage;
const T scale = T(1) / static_cast<T>(inner_size);
for (int i = blockIdx.x; i < outer_size; i += gridDim.x) {
T m_val = 0;
T v_val = 0;
for (int j = threadIdx.x; j < inner_size; j += blockDim.x) {
int X_index = 0;
int Y_index = i * inner_size + j;
#pragma unroll
for (int d = D - 1; d >= 0; --d) {
int r;
Y_dims.data[d].DivMod(Y_index, &Y_index, &r);
X_index += r * X_strides.data[d];
}
#if __CUDA_ARCH__ >= 350
m_val += __ldg(X + X_index);
v_val += __ldg(X + X_index) * __ldg(X + X_index);
#else
m_val += X[X_index];
v_val += X[X_index] * X[X_index];
#endif
}
m_val = BlockReduce<T>(m_storage).Sum(m_val);
v_val = BlockReduce<T>(v_storage).Sum(v_val);
if (threadIdx.x == 0) {
const T mu = m_val * scale;
mean[i] = mu;
variance[i] = v_val * scale - mu * mu;
}
__syncthreads();
}
}
template <typename T, int D>
CAFFE2_CUDA_EXPORT void MomentsCUDAImpl(
const int outer_size,
const int inner_size,
const int* dims,
const int* axes,
const T* X,
T* mean,
T* variance,
CUDAContext* context) {
SimpleArray<int, D> X_strides;
SimpleArray<FixedDivisor<int>, D> Y_dims;
utils::ComputeTransposedStrides(D, dims, axes, X_strides.data);
for (int i = 0; i < D; ++i) {
Y_dims.data[i] = FixedDivisor<int>(dims[axes[i]]);
}
hipLaunchKernelGGL(( MomentsCUDAKernel<T, D>)
, dim3(::min(outer_size, CAFFE_MAXIMUM_NUM_BLOCKS)),
dim3(CAFFE_CUDA_NUM_THREADS),
0,
context->cuda_stream(),
outer_size, inner_size, X_strides, Y_dims, X, mean, variance);
}
template <typename T>
CAFFE2_CUDA_EXPORT void MomentsCUDA(
const int num_dims,
const int* dims,
const int num_axes,
const int* axes,
const T* X,
T* mean,
T* variance,
CUDAContext* context) {
CAFFE_ENFORCE_LE(num_axes, num_dims);
std::vector<int> Y_dims_vector(dims, dims + num_dims);
for (int i = 0; i < num_axes; ++i) {
Y_dims_vector[axes[i]] = 1;
}
const int* X_dims = dims;
const int* Y_dims = Y_dims_vector.data();
const int X_size =
std::accumulate(X_dims, X_dims + num_dims, 1, std::multiplies<int>());
const int Y_size =
std::accumulate(Y_dims, Y_dims + num_dims, 1, std::multiplies<int>());
if (X_size == 0) {
Set<T, CUDAContext>(Y_size, T(0), mean, context);
Set<T, CUDAContext>(Y_size, T(0), variance, context);
return;
}
if (std::equal(X_dims, X_dims + num_dims, Y_dims)) {
hipMemcpyAsync(
mean,
X,
sizeof(T) * X_size,
hipMemcpyDeviceToDevice,
context->cuda_stream());
Set<T, CUDAContext>(Y_size, T(0), variance, context);
return;
}
int rows;
int cols;
if (utils::IsRowwiseReduce(num_dims, X_dims, Y_dims, &rows, &cols)) {
hipLaunchKernelGGL(( RowwiseMomentsCUDAKernel<T>)
, dim3(::min(rows, CAFFE_MAXIMUM_NUM_BLOCKS)),
dim3(CAFFE_CUDA_NUM_THREADS),
0,
context->cuda_stream(), rows, cols, X, mean, variance);
return;
}
if (utils::IsColwiseReduce(num_dims, X_dims, Y_dims, &rows, &cols)) {
hipLaunchKernelGGL(( ColwiseMomentsCUDAKernel<T>)
, dim3(::min(rows, CAFFE_MAXIMUM_NUM_BLOCKS)),
dim3(CAFFE_CUDA_NUM_THREADS),
0,
context->cuda_stream(), rows, cols, X, mean, variance);
return;
}
std::vector<int> transpose_axes(num_dims);
utils::ComputeTransposeAxesForReduceOp(
num_dims, num_axes, axes, transpose_axes.data());
const int pivot = num_dims - num_axes;
int outer_size = 1;
for (int i = 0; i < pivot; ++i) {
outer_size *= dims[transpose_axes[i]];
}
int inner_size = 1;
for (int i = pivot; i < num_dims; ++i) {
inner_size *= dims[transpose_axes[i]];
}
DISPATCH_FUNCTION_BY_VALUE_WITH_TYPE_1(
num_dims,
MomentsCUDAImpl,
T,
outer_size,
inner_size,
dims,
transpose_axes.data(),
X,
mean,
variance,
context);
}
} // namespace
#define CAFFE2_SPECIALIZED_CUDA_MOMENTS(T) \
template <> \
CAFFE2_CUDA_EXPORT void Moments<T, CUDAContext>( \
const int num_dims, \
const int* dims, \
const int num_axes, \
const int* axes, \
const T* X, \
T* mean, \
T* variance, \
CUDAContext* context) { \
MomentsCUDA<T>( \
num_dims, dims, num_axes, axes, X, mean, variance, context); \
}
CAFFE2_SPECIALIZED_CUDA_MOMENTS(float)
#undef CAFFE2_SPECIALIZED_CUDA_MOMENTS
namespace {
template <typename T>
__global__ void
InvStdCUDAKernel(const int N, const T epsilon, const T* var, T* inv_std);
#define DELEGATE_INV_STD_KERNEL_FUNCTION(T, Func) \
template <> \
__global__ void InvStdCUDAKernel<T>( \
const int N, const T epsilon, const T* var, T* inv_std) { \
CUDA_1D_KERNEL_LOOP(i, N) { \
inv_std[i] = Func(var[i] + epsilon); \
} \
}
DELEGATE_INV_STD_KERNEL_FUNCTION(float, rsqrtf)
#undef DELEGATE_INV_STD_KERNEL_FUNCTION
} // namespace
#define CAFFE2_SPECIALIZED_CUDA_INV_STD(T) \
template <> \
CAFFE2_CUDA_EXPORT void InvStd<T, CUDAContext>( \
const int N, \
const T epsilon, \
const T* var, \
T* inv_std, \
CUDAContext* context) { \
hipLaunchKernelGGL(( InvStdCUDAKernel<T>) \
, dim3(CAFFE_GET_BLOCKS(N)), \
CAFFE_CUDA_NUM_THREADS, \
0, \
context->cuda_stream(), N, epsilon, var, inv_std); \
}
CAFFE2_SPECIALIZED_CUDA_INV_STD(float)
#undef CAFFE2_SPECIALIZED_CUDA_INV_STD
namespace {
template <typename T, int D>
__global__ void TransposeCUDAKernel(
const int size,
const SimpleArray<int, D> X_strides,
const SimpleArray<FixedDivisor<int>, D> Y_dims,
const T* X,
T* Y) {
CUDA_1D_KERNEL_LOOP(Y_index, size) {
int X_index = 0;
int Y_index_val = Y_index;
#pragma unroll
for (int i = D - 1; i >= 0; --i) {
int d;
Y_dims.data[i].DivMod(Y_index_val, &Y_index_val, &d);
X_index += d * X_strides.data[i];
}
#if __CUDA_ARCH__ >= 350
Y[Y_index] = __ldg(X + X_index);
#else
Y[Y_index] = X[X_index];
#endif
}
}
template <typename T, int D>
CAFFE2_CUDA_EXPORT void TransposeCUDAImpl(
const int* dims,
const int* axes,
const T* X,
T* Y,
CUDAContext* context) {
SimpleArray<int, D> X_strides;
SimpleArray<FixedDivisor<int>, D> Y_dims;
utils::ComputeTransposedStrides(D, dims, axes, X_strides.data);
int size = 1;
for (int i = 0; i < D; ++i) {
Y_dims.data[i] = FixedDivisor<int>(dims[axes[i]]);
size *= dims[i];
}
hipLaunchKernelGGL(( TransposeCUDAKernel<T, D>)
, dim3(CAFFE_GET_BLOCKS(size)),
dim3(CAFFE_CUDA_NUM_THREADS),
0,
context->cuda_stream(), size, X_strides, Y_dims, X, Y);
}
} // namespace
#define CAFFE2_SPECIALIZED_CUDA_TRANSPOSE(T) \
template <> \
CAFFE2_CUDA_EXPORT void Transpose<T, CUDAContext>( \
const int ndim, \
const int* dims, \
const int* axes, \
const T* X, \
T* Y, \
CUDAContext* context) { \
if (utils::IsIdentityPermutation(ndim, axes)) { \
const int size = \
std::accumulate(dims, dims + ndim, 1, std::multiplies<int>()); \
context->template Copy<T, CUDAContext, CUDAContext>(size, X, Y); \
return; \
} \
DISPATCH_FUNCTION_BY_VALUE_WITH_TYPE_1( \
ndim, TransposeCUDAImpl, T, dims, axes, X, Y, context); \
}
CAFFE2_SPECIALIZED_CUDA_TRANSPOSE(float)
CAFFE2_SPECIALIZED_CUDA_TRANSPOSE(double)
CAFFE2_SPECIALIZED_CUDA_TRANSPOSE(int)
CAFFE2_SPECIALIZED_CUDA_TRANSPOSE(TIndex)
#undef CAFFE2_SPECIALIZED_CUDA_TRANSPOSE
namespace {
template <typename T, StorageOrder kOrder>
__global__ void AffineChannelCUDAKernel(
const int size,
const int C,
const int HxW,
const T* X,
const T* scale,
const T* bias,
T* Y) {
CUDA_1D_KERNEL_LOOP(i, size) {
const int c = kOrder == StorageOrder::NCHW ? i / HxW % C : i % C;
#if __CUDA_ARCH__ >= 350
Y[i] = __ldg(scale + c) * __ldg(X + i) + __ldg(bias + c);
#else
Y[i] = scale[c] * X[i] + bias[c];
#endif
}
}
} // namespace
#define CAFFE2_SPECIALIZED_CUDA_AFFINE_CHANNEL(T, kOrder) \
template <> \
CAFFE2_CUDA_EXPORT void AffineChannel<T, CUDAContext, kOrder>( \
const int N, \
const int C, \
const int HxW, \
const T* X, \
const T* scale, \
const T* bias, \
T* Y, \
CUDAContext* context) { \
const int size = N * C * HxW; \
hipLaunchKernelGGL(( AffineChannelCUDAKernel<T, kOrder>) \
, dim3(CAFFE_GET_BLOCKS(size)), \
CAFFE_CUDA_NUM_THREADS, \
0, \
context->cuda_stream(), size, C, HxW, X, scale, bias, Y); \
}
CAFFE2_SPECIALIZED_CUDA_AFFINE_CHANNEL(float, StorageOrder::NCHW)
CAFFE2_SPECIALIZED_CUDA_AFFINE_CHANNEL(float, StorageOrder::NHWC)
#undef CAFFE2_SPECIALIZED_CUDA_AFFINE_CHANNEL
} // namespace math
} // namespace caffe2
| e8f63b83f5ea71140c87f57b560ab288fa1669f5.cu | // Implements the math functions for GPU.
#include "caffe2/utils/math.h"
#include <cstring>
#include <limits>
#include <numeric>
#include <vector>
#include <cub/block/block_reduce.cuh>
#include <cub/cub.cuh>
#include <thrust/device_vector.h>
#include <thrust/functional.h>
#include "caffe2/core/context_gpu.h"
#include "caffe2/utils/conversions.h"
#include "caffe2/utils/fixed_divisor.h"
#include "caffe2/utils/math_utils.h"
#if THRUST_VERSION >= 100800
#define THRUST_SUPPORTS_PER_THREAD
#endif // THRUST_VERSION >= 100800
namespace caffe2 {
namespace math {
namespace {
#define DELEGATE_SIMPLE_HOST_DEVICE_BINARY_FUNCTOR(Func, expr) \
template <typename T> \
struct Func##Functor { \
inline __host__ __device__ T \
operator()(const T& lhs, const T& rhs) const { \
return lhs expr rhs; \
} \
}; \
template <> \
struct Func##Functor<at::Half> { \
inline __host__ __device__ at::Half \
operator()(const at::Half& lhs, const at::Half& rhs) const { \
return convert::To<float, at::Half>(convert::To<at::Half, float>( \
lhs) expr convert::To<at::Half, float>(rhs)); \
} \
};
DELEGATE_SIMPLE_HOST_DEVICE_BINARY_FUNCTOR(Add, +)
DELEGATE_SIMPLE_HOST_DEVICE_BINARY_FUNCTOR(Sub, -)
DELEGATE_SIMPLE_HOST_DEVICE_BINARY_FUNCTOR(Mul, *)
DELEGATE_SIMPLE_HOST_DEVICE_BINARY_FUNCTOR(Div, /)
#undef DELEGATE_SIMPLE_HOST_DEVICE_BINARY_FUNCTOR
template <typename T>
__global__ void SinCosCUDAKernel(const int N, const T* X, T* S, T* C) {
CUDA_1D_KERNEL_LOOP(i, N) {
#if __CUDA_ARCH__ >= 350
sincos(__ldg(X + i), S + i, C + i);
#else
sincos(X[i], S + i, C + i);
#endif
}
}
template <typename TIn, typename TOut, class BinaryOperator>
__global__ void SimpleBinaryOpCUDAKernel(
const int N,
const BinaryOperator op,
const TIn* A,
const TIn* B,
TOut* C) {
CUDA_1D_KERNEL_LOOP(i, N) {
C[i] = op(A[i], B[i]);
}
}
template <typename TIn, typename TOut, class BinaryOperator, bool broadcast_1st>
__global__ void RowwiseBinaryOpCUDAKenel(
const int size,
const FixedDivisor<int> cols,
const BinaryOperator op,
const TIn* A,
const TIn* B,
TOut* C) {
CUDA_1D_KERNEL_LOOP(C_index, size) {
const int j = cols.Mod(C_index);
const int A_index = broadcast_1st ? j : C_index;
const int B_index = broadcast_1st ? C_index : j;
C[C_index] = op(A[A_index], B[B_index]);
}
}
template <typename TIn, typename TOut, class BinaryOperator, bool broadcast_1st>
__global__ void ColwiseBinaryOpCUDAKenel(
const int size,
const FixedDivisor<int> cols,
const BinaryOperator op,
const TIn* A,
const TIn* B,
TOut* C) {
CUDA_1D_KERNEL_LOOP(C_index, size) {
const int i = cols.Div(C_index);
const int A_index = broadcast_1st ? i : C_index;
const int B_index = broadcast_1st ? C_index : i;
C[C_index] = op(A[A_index], B[B_index]);
}
}
template <typename TIn, typename TOut, class BinaryOperator, int D>
__global__ void BroadcastBinaryOpCUDAKernel(
const int size,
const SimpleArray<int, D> A_strides,
const SimpleArray<int, D> B_strides,
const SimpleArray<FixedDivisor<int>, D> C_dims,
const BinaryOperator op,
const TIn* A,
const TIn* B,
TOut* C) {
CUDA_1D_KERNEL_LOOP(C_index, size) {
int A_index = 0;
int B_index = 0;
int C_index_val = C_index;
#pragma unroll
for (int i = D - 1; i >= 0; --i) {
int d;
C_dims.data[i].DivMod(C_index_val, &C_index_val, &d);
A_index += d * A_strides.data[i];
B_index += d * B_strides.data[i];
}
C[C_index] = op(A[A_index], B[B_index]);
}
}
template <typename TIn, typename TOut, class BinaryOperator>
CAFFE2_CUDA_EXPORT void BinaryOpWith2DBroadcasting(
const int rows,
const int cols,
const bool rowwise_broadcast,
const bool broadcast_1st,
const BinaryOperator& op,
const TIn* A,
const TIn* B,
TOut* C,
CUDAContext* context) {
if (rows == 0 || cols == 0) {
return;
}
const int size = rows * cols;
const FixedDivisor<int> cols_div(cols);
if (rowwise_broadcast) {
if (broadcast_1st) {
RowwiseBinaryOpCUDAKenel<TIn, TOut, BinaryOperator, true>
<<<CAFFE_GET_BLOCKS(size),
CAFFE_CUDA_NUM_THREADS,
0,
context->cuda_stream()>>>(size, cols_div, op, A, B, C);
} else {
RowwiseBinaryOpCUDAKenel<TIn, TOut, BinaryOperator, false>
<<<CAFFE_GET_BLOCKS(size),
CAFFE_CUDA_NUM_THREADS,
0,
context->cuda_stream()>>>(size, cols_div, op, A, B, C);
}
} else {
if (broadcast_1st) {
ColwiseBinaryOpCUDAKenel<TIn, TOut, BinaryOperator, true>
<<<CAFFE_GET_BLOCKS(size),
CAFFE_CUDA_NUM_THREADS,
0,
context->cuda_stream()>>>(size, cols_div, op, A, B, C);
} else {
ColwiseBinaryOpCUDAKenel<TIn, TOut, BinaryOperator, false>
<<<CAFFE_GET_BLOCKS(size),
CAFFE_CUDA_NUM_THREADS,
0,
context->cuda_stream()>>>(size, cols_div, op, A, B, C);
}
}
}
template <typename TIn, typename TOut, class BinaryOperator, int D>
CAFFE2_CUDA_EXPORT void BroadcastBinaryOpImpl(
const int* A_dims,
const int* B_dims,
const int* C_dims,
const BinaryOperator& op,
const TIn* A,
const TIn* B,
TOut* C,
CUDAContext* context) {
SimpleArray<int, D> A_strides_array;
SimpleArray<int, D> B_strides_array;
SimpleArray<FixedDivisor<int>, D> C_dims_array;
int A_stride = 1;
int B_stride = 1;
for (int i = D - 1; i >= 0; --i) {
if (C_dims[i] == 0) {
return;
}
A_strides_array.data[i] = A_dims[i] == 1 ? 0 : A_stride;
B_strides_array.data[i] = B_dims[i] == 1 ? 0 : B_stride;
A_stride *= A_dims[i];
B_stride *= B_dims[i];
C_dims_array.data[i] = FixedDivisor<int>(C_dims[i]);
}
const int size =
std::accumulate(C_dims, C_dims + D, 1, std::multiplies<int>());
BroadcastBinaryOpCUDAKernel<TIn, TOut, BinaryOperator, D>
<<<CAFFE_GET_BLOCKS(size),
CAFFE_CUDA_NUM_THREADS,
0,
context->cuda_stream()>>>(
size, A_strides_array, B_strides_array, C_dims_array, op, A, B, C);
}
template <typename TIn, typename TOut, class BinaryOperator>
CAFFE2_CUDA_EXPORT void BroadcastBinaryOp(
const int A_ndim,
const int* A_dims,
const int B_ndim,
const int* B_dims,
const BinaryOperator& op,
const TIn* A,
const TIn* B,
TOut* C,
CUDAContext* context) {
const int ndim = std::max(A_ndim, B_ndim);
std::vector<int> A_dims_array(ndim);
std::vector<int> B_dims_array(ndim);
std::vector<int> C_dims_array(ndim);
utils::ComputeBroadcastBinaryOpDims(
A_ndim,
A_dims,
B_ndim,
B_dims,
A_dims_array.data(),
B_dims_array.data(),
C_dims_array.data());
if (A_dims_array == B_dims_array) {
const int size = std::accumulate(
C_dims_array.cbegin(), C_dims_array.cend(), 1, std::multiplies<int>());
SimpleBinaryOpCUDAKernel<TIn, TOut, BinaryOperator>
<<<CAFFE_GET_BLOCKS(size),
CAFFE_CUDA_NUM_THREADS,
0,
context->cuda_stream()>>>(size, op, A, B, C);
return;
}
int rows;
int cols;
bool broadcast_1st;
if (utils::IsRowwiseBroadcastBinaryOp(
ndim,
A_dims_array.data(),
B_dims_array.data(),
&rows,
&cols,
&broadcast_1st)) {
BinaryOpWith2DBroadcasting<TIn, TOut, BinaryOperator>(
rows, cols, true, broadcast_1st, op, A, B, C, context);
return;
}
if (utils::IsColwiseBroadcastBinaryOp(
ndim,
A_dims_array.data(),
B_dims_array.data(),
&rows,
&cols,
&broadcast_1st)) {
BinaryOpWith2DBroadcasting<TIn, TOut, BinaryOperator>(
rows, cols, false, broadcast_1st, op, A, B, C, context);
return;
}
DISPATCH_FUNCTION_BY_VALUE_WITH_TYPE_3(
ndim,
BroadcastBinaryOpImpl,
TIn,
TOut,
BinaryOperator,
A_dims_array.data(),
B_dims_array.data(),
C_dims_array.data(),
op,
A,
B,
C,
context);
}
} // namespace
#define DELEGATE_SIMPLE_CUDA_UNARY_FUNCTION(T, Func, op) \
__global__ void Func##CUDAKernel(const int N, const T* X, T* Y) { \
CUDA_1D_KERNEL_LOOP(i, N) { \
Y[i] = op(X[i]); \
} \
} \
template <> \
CAFFE2_CUDA_EXPORT void Func<T, CUDAContext>( \
const int N, const T* x, T* y, CUDAContext* context) { \
Func##CUDAKernel<<< \
CAFFE_GET_BLOCKS(N), \
CAFFE_CUDA_NUM_THREADS, \
0, \
context->cuda_stream()>>>(N, x, y); \
}
DELEGATE_SIMPLE_CUDA_UNARY_FUNCTION(float, Exp, expf)
DELEGATE_SIMPLE_CUDA_UNARY_FUNCTION(float, Log, logf)
DELEGATE_SIMPLE_CUDA_UNARY_FUNCTION(float, Cos, cosf)
DELEGATE_SIMPLE_CUDA_UNARY_FUNCTION(float, Acos, acosf)
DELEGATE_SIMPLE_CUDA_UNARY_FUNCTION(float, Sin, sinf)
DELEGATE_SIMPLE_CUDA_UNARY_FUNCTION(float, Asin, asinf)
DELEGATE_SIMPLE_CUDA_UNARY_FUNCTION(float, Tan, tanf)
DELEGATE_SIMPLE_CUDA_UNARY_FUNCTION(float, Atan, atanf)
DELEGATE_SIMPLE_CUDA_UNARY_FUNCTION(float, Sinh, sinhf)
DELEGATE_SIMPLE_CUDA_UNARY_FUNCTION(float, Cosh, coshf)
DELEGATE_SIMPLE_CUDA_UNARY_FUNCTION(float, Tanh, tanhf)
DELEGATE_SIMPLE_CUDA_UNARY_FUNCTION(float, Abs, fabsf)
DELEGATE_SIMPLE_CUDA_UNARY_FUNCTION(float, Sqr, utils::Square<float>)
DELEGATE_SIMPLE_CUDA_UNARY_FUNCTION(float, Sqrt, sqrtf)
DELEGATE_SIMPLE_CUDA_UNARY_FUNCTION(float, Rsqrt, rsqrtf)
DELEGATE_SIMPLE_CUDA_UNARY_FUNCTION(float, Cbrt, cbrtf)
DELEGATE_SIMPLE_CUDA_UNARY_FUNCTION(float, Cube, utils::Cube<float>)
DELEGATE_SIMPLE_CUDA_UNARY_FUNCTION(double, Cube, utils::Cube<double>)
DELEGATE_SIMPLE_CUDA_UNARY_FUNCTION(
std::int32_t,
Cube,
utils::Cube<std::int32_t>)
DELEGATE_SIMPLE_CUDA_UNARY_FUNCTION(
std::int64_t,
Cube,
utils::Cube<std::int64_t>)
DELEGATE_SIMPLE_CUDA_UNARY_FUNCTION(bool, Not, utils::Not)
DELEGATE_SIMPLE_CUDA_UNARY_FUNCTION(float, Neg, utils::Negate<float>)
DELEGATE_SIMPLE_CUDA_UNARY_FUNCTION(double, Neg, utils::Negate<double>)
DELEGATE_SIMPLE_CUDA_UNARY_FUNCTION(
std::int32_t,
Neg,
utils::Negate<std::int32_t>)
DELEGATE_SIMPLE_CUDA_UNARY_FUNCTION(
std::int64_t,
Neg,
utils::Negate<std::int64_t>)
DELEGATE_SIMPLE_CUDA_UNARY_FUNCTION(float, Sign, utils::Sign<float>)
DELEGATE_SIMPLE_CUDA_UNARY_FUNCTION(double, Sign, utils::Sign<double>)
DELEGATE_SIMPLE_CUDA_UNARY_FUNCTION(
std::int32_t,
Sign,
utils::Sign<std::int32_t>)
DELEGATE_SIMPLE_CUDA_UNARY_FUNCTION(
std::int64_t,
Sign,
utils::Sign<std::int64_t>)
DELEGATE_SIMPLE_CUDA_UNARY_FUNCTION(float, Inv, utils::Inv<float>)
DELEGATE_SIMPLE_CUDA_UNARY_FUNCTION(double, Inv, utils::Inv<double>)
#undef DELEGATE_SIMPLE_CUDA_UNARY_FUNCTION
#define CAFFE2_SPECIALIZED_CUDA_SINCOS(T) \
template <> \
CAFFE2_CUDA_EXPORT void SinCos<T, CUDAContext>( \
const int N, const T* x, T* ys, T* yc, CUDAContext* context) { \
SinCosCUDAKernel<<< \
CAFFE_GET_BLOCKS(N), \
CAFFE_CUDA_NUM_THREADS, \
0, \
context->cuda_stream()>>>(N, x, ys, yc); \
}
CAFFE2_SPECIALIZED_CUDA_SINCOS(float)
CAFFE2_SPECIALIZED_CUDA_SINCOS(double)
#undef CAFFE2_SPECIALIZED_CUDA_SINCOS
#define DELEGATE_SIMPLE_CUDA_BINARY_FUNCTION(TIn, TOut, Func, Op) \
template <> \
CAFFE2_CUDA_EXPORT void Func<TIn, CUDAContext>( \
const int N, \
const TIn* A, \
const TIn* B, \
TOut* C, \
CUDAContext* context) { \
SimpleBinaryOpCUDAKernel<TIn, TOut, Op<TIn>> \
<<<CAFFE_GET_BLOCKS(N), \
CAFFE_CUDA_NUM_THREADS, \
0, \
context->cuda_stream()>>>(N, Op<TIn>(), A, B, C); \
}
#define DEFINE_SIMPLE_CUDA_COMPARE_FUNCTION(Func, Op) \
DELEGATE_SIMPLE_CUDA_BINARY_FUNCTION(std::int32_t, bool, Func, Op) \
DELEGATE_SIMPLE_CUDA_BINARY_FUNCTION(std::int64_t, bool, Func, Op) \
DELEGATE_SIMPLE_CUDA_BINARY_FUNCTION(float, bool, Func, Op) \
DELEGATE_SIMPLE_CUDA_BINARY_FUNCTION(double, bool, Func, Op) \
DELEGATE_SIMPLE_CUDA_BINARY_FUNCTION(bool, bool, Func, Op)
DEFINE_SIMPLE_CUDA_COMPARE_FUNCTION(EQ, thrust::equal_to)
DEFINE_SIMPLE_CUDA_COMPARE_FUNCTION(NE, thrust::not_equal_to)
DEFINE_SIMPLE_CUDA_COMPARE_FUNCTION(LT, thrust::less)
DEFINE_SIMPLE_CUDA_COMPARE_FUNCTION(LE, thrust::less_equal)
DEFINE_SIMPLE_CUDA_COMPARE_FUNCTION(GT, thrust::greater)
DEFINE_SIMPLE_CUDA_COMPARE_FUNCTION(GE, thrust::greater_equal)
#undef DEFINE_SIMPLE_CUDA_COMPARE_FUNCTION
#define DEFINE_SIMPLE_CUDA_BINARY_FUNCTION(Func, Op) \
DELEGATE_SIMPLE_CUDA_BINARY_FUNCTION(std::int32_t, std::int32_t, Func, Op) \
DELEGATE_SIMPLE_CUDA_BINARY_FUNCTION(std::int64_t, std::int64_t, Func, Op) \
DELEGATE_SIMPLE_CUDA_BINARY_FUNCTION(float, float, Func, Op) \
DELEGATE_SIMPLE_CUDA_BINARY_FUNCTION(double, double, Func, Op) \
DELEGATE_SIMPLE_CUDA_BINARY_FUNCTION(at::Half, at::Half, Func, Op)
DEFINE_SIMPLE_CUDA_BINARY_FUNCTION(Add, AddFunctor)
DEFINE_SIMPLE_CUDA_BINARY_FUNCTION(Sub, SubFunctor)
DEFINE_SIMPLE_CUDA_BINARY_FUNCTION(Mul, MulFunctor)
DEFINE_SIMPLE_CUDA_BINARY_FUNCTION(Div, DivFunctor)
#undef DEFINE_SIMPLE_CUDA_BINARY_FUNCTION
DELEGATE_SIMPLE_CUDA_BINARY_FUNCTION(bool, bool, And, thrust::logical_and)
DELEGATE_SIMPLE_CUDA_BINARY_FUNCTION(bool, bool, Or, thrust::logical_or)
DELEGATE_SIMPLE_CUDA_BINARY_FUNCTION(bool, bool, Xor, thrust::bit_xor)
#define DEFINE_SIMPLE_CUDA_BITWISE_BINARY_FUNCTION(Func, Op) \
DELEGATE_SIMPLE_CUDA_BINARY_FUNCTION(bool, bool, Func, Op) \
DELEGATE_SIMPLE_CUDA_BINARY_FUNCTION(std::int32_t, std::int32_t, Func, Op) \
DELEGATE_SIMPLE_CUDA_BINARY_FUNCTION(std::int64_t, std::int64_t, Func, Op)
DEFINE_SIMPLE_CUDA_BITWISE_BINARY_FUNCTION(BitwiseAnd, thrust::bit_and)
DEFINE_SIMPLE_CUDA_BITWISE_BINARY_FUNCTION(BitwiseOr, thrust::bit_or)
DEFINE_SIMPLE_CUDA_BITWISE_BINARY_FUNCTION(BitwiseXor, thrust::bit_xor)
#undef DEFINE_SIMPLE_CUDA_BITWISE_BINARY_FUNCTION
DELEGATE_SIMPLE_CUDA_BINARY_FUNCTION(
float,
float,
ElemwiseMax,
thrust::maximum);
#undef DELEGATE_SIMPLE_CUDA_BINARY_FUNCTION
#define DELEGATE_2D_BROADCAST_CUDA_BINARY_FUNCTION(TIn, TOut, Func, Op) \
template <> \
CAFFE2_CUDA_EXPORT void Rowwise##Func<TIn, CUDAContext, true>( \
const int rows, \
const int cols, \
const TIn* A, \
const TIn* B, \
TOut* C, \
CUDAContext* context) { \
if (rows == 0 || cols == 0) { \
return; \
} \
const int size = rows * cols; \
const FixedDivisor<int> cols_div(cols); \
RowwiseBinaryOpCUDAKenel<TIn, TOut, Op<TIn>, true> \
<<<CAFFE_GET_BLOCKS(size), \
CAFFE_CUDA_NUM_THREADS, \
0, \
context->cuda_stream()>>>(size, cols_div, Op<TIn>(), A, B, C); \
} \
template <> \
CAFFE2_CUDA_EXPORT void Rowwise##Func<TIn, CUDAContext, false>( \
const int rows, \
const int cols, \
const TIn* A, \
const TIn* B, \
TOut* C, \
CUDAContext* context) { \
if (rows == 0 || cols == 0) { \
return; \
} \
const int size = rows * cols; \
const FixedDivisor<int> cols_div(cols); \
RowwiseBinaryOpCUDAKenel<TIn, TOut, Op<TIn>, false> \
<<<CAFFE_GET_BLOCKS(size), \
CAFFE_CUDA_NUM_THREADS, \
0, \
context->cuda_stream()>>>(size, cols_div, Op<TIn>(), A, B, C); \
} \
template <> \
CAFFE2_CUDA_EXPORT void Colwise##Func<TIn, CUDAContext, true>( \
const int rows, \
const int cols, \
const TIn* A, \
const TIn* B, \
TOut* C, \
CUDAContext* context) { \
if (rows == 0 || cols == 0) { \
return; \
} \
const int size = rows * cols; \
const FixedDivisor<int> cols_div(cols); \
ColwiseBinaryOpCUDAKenel<TIn, TOut, Op<TIn>, true> \
<<<CAFFE_GET_BLOCKS(size), \
CAFFE_CUDA_NUM_THREADS, \
0, \
context->cuda_stream()>>>(size, cols_div, Op<TIn>(), A, B, C); \
} \
template <> \
CAFFE2_CUDA_EXPORT void Colwise##Func<TIn, CUDAContext, false>( \
const int rows, \
const int cols, \
const TIn* A, \
const TIn* B, \
TOut* C, \
CUDAContext* context) { \
if (rows == 0 || cols == 0) { \
return; \
} \
const int size = rows * cols; \
const FixedDivisor<int> cols_div(cols); \
ColwiseBinaryOpCUDAKenel<TIn, TOut, Op<TIn>, false> \
<<<CAFFE_GET_BLOCKS(size), \
CAFFE_CUDA_NUM_THREADS, \
0, \
context->cuda_stream()>>>(size, cols_div, Op<TIn>(), A, B, C); \
}
#define DEFINE_2D_BROADCAST_CUDA_COMPARE_FUNCTION(Func, Op) \
DELEGATE_2D_BROADCAST_CUDA_BINARY_FUNCTION(std::int32_t, bool, Func, Op) \
DELEGATE_2D_BROADCAST_CUDA_BINARY_FUNCTION(std::int64_t, bool, Func, Op) \
DELEGATE_2D_BROADCAST_CUDA_BINARY_FUNCTION(float, bool, Func, Op) \
DELEGATE_2D_BROADCAST_CUDA_BINARY_FUNCTION(double, bool, Func, Op) \
DELEGATE_2D_BROADCAST_CUDA_BINARY_FUNCTION(bool, bool, Func, Op)
DEFINE_2D_BROADCAST_CUDA_COMPARE_FUNCTION(EQ, thrust::equal_to)
DEFINE_2D_BROADCAST_CUDA_COMPARE_FUNCTION(NE, thrust::not_equal_to)
DEFINE_2D_BROADCAST_CUDA_COMPARE_FUNCTION(LT, thrust::less)
DEFINE_2D_BROADCAST_CUDA_COMPARE_FUNCTION(LE, thrust::less_equal)
DEFINE_2D_BROADCAST_CUDA_COMPARE_FUNCTION(GT, thrust::greater)
DEFINE_2D_BROADCAST_CUDA_COMPARE_FUNCTION(GE, thrust::greater_equal)
#undef DEFINE_2D_BROADCAST_CUDA_COMPARE_FUNCTION
#define DEFINE_2D_BROADCAST_CUDA_BINARY_FUNCTION(Func, Op) \
DELEGATE_2D_BROADCAST_CUDA_BINARY_FUNCTION( \
std::int32_t, std::int32_t, Func, Op) \
DELEGATE_2D_BROADCAST_CUDA_BINARY_FUNCTION( \
std::int64_t, std::int64_t, Func, Op) \
DELEGATE_2D_BROADCAST_CUDA_BINARY_FUNCTION(float, float, Func, Op) \
DELEGATE_2D_BROADCAST_CUDA_BINARY_FUNCTION(double, double, Func, Op) \
DELEGATE_2D_BROADCAST_CUDA_BINARY_FUNCTION(at::Half, at::Half, Func, Op)
DEFINE_2D_BROADCAST_CUDA_BINARY_FUNCTION(Add, AddFunctor)
DEFINE_2D_BROADCAST_CUDA_BINARY_FUNCTION(Sub, SubFunctor)
DEFINE_2D_BROADCAST_CUDA_BINARY_FUNCTION(Mul, MulFunctor)
DEFINE_2D_BROADCAST_CUDA_BINARY_FUNCTION(Div, DivFunctor)
#undef DEFINE_2D_BROADCAST_CUDA_BINARY_FUNCTION
DELEGATE_2D_BROADCAST_CUDA_BINARY_FUNCTION(bool, bool, And, thrust::logical_and)
DELEGATE_2D_BROADCAST_CUDA_BINARY_FUNCTION(bool, bool, Or, thrust::logical_or)
DELEGATE_2D_BROADCAST_CUDA_BINARY_FUNCTION(bool, bool, Xor, thrust::bit_xor)
#define DEFINE_2D_BROADCAST_CUDA_BITWISE_BINARY_FUNCTION(Func, Op) \
DELEGATE_2D_BROADCAST_CUDA_BINARY_FUNCTION(bool, bool, Func, Op) \
DELEGATE_2D_BROADCAST_CUDA_BINARY_FUNCTION( \
std::int32_t, std::int32_t, Func, Op) \
DELEGATE_2D_BROADCAST_CUDA_BINARY_FUNCTION( \
std::int64_t, std::int64_t, Func, Op)
DEFINE_2D_BROADCAST_CUDA_BITWISE_BINARY_FUNCTION(BitwiseAnd, thrust::bit_and)
DEFINE_2D_BROADCAST_CUDA_BITWISE_BINARY_FUNCTION(BitwiseOr, thrust::bit_or)
DEFINE_2D_BROADCAST_CUDA_BITWISE_BINARY_FUNCTION(BitwiseXor, thrust::bit_xor)
#undef DEFINE_2D_BROADCAST_CUDA_BITWISE_BINARY_FUNCTION
#undef DELEGATE_2D_BROADCAST_CUDA_BINARY_FUNCTION
#define DELEGATE_BROADCAST_CUDA_BINARY_FUNCTION(TIn, TOut, Func, Op) \
template <> \
CAFFE2_CUDA_EXPORT void Func<TIn, CUDAContext>( \
const int A_ndim, \
const int* A_dims, \
const int B_ndim, \
const int* B_dims, \
const TIn* A, \
const TIn* B, \
TOut* C, \
CUDAContext* context) { \
BroadcastBinaryOp<TIn, TOut, Op<TIn>>( \
A_ndim, A_dims, B_ndim, B_dims, Op<TIn>(), A, B, C, context); \
}
#define DEFINE_BROADCAST_CUDA_COMPARE_FUNCTION(Func, Op) \
DELEGATE_BROADCAST_CUDA_BINARY_FUNCTION(std::int32_t, bool, Func, Op) \
DELEGATE_BROADCAST_CUDA_BINARY_FUNCTION(std::int64_t, bool, Func, Op) \
DELEGATE_BROADCAST_CUDA_BINARY_FUNCTION(float, bool, Func, Op) \
DELEGATE_BROADCAST_CUDA_BINARY_FUNCTION(double, bool, Func, Op) \
DELEGATE_BROADCAST_CUDA_BINARY_FUNCTION(bool, bool, Func, Op)
DEFINE_BROADCAST_CUDA_COMPARE_FUNCTION(EQ, thrust::equal_to)
DEFINE_BROADCAST_CUDA_COMPARE_FUNCTION(NE, thrust::not_equal_to)
DEFINE_BROADCAST_CUDA_COMPARE_FUNCTION(LT, thrust::less)
DEFINE_BROADCAST_CUDA_COMPARE_FUNCTION(LE, thrust::less_equal)
DEFINE_BROADCAST_CUDA_COMPARE_FUNCTION(GT, thrust::greater)
DEFINE_BROADCAST_CUDA_COMPARE_FUNCTION(GE, thrust::greater_equal)
#undef DEFINE_BROADCAST_CUDA_COMPARE_FUNCTION
#define DEFINE_BROADCAST_CUDA_BINARY_FUNCTION(Func, Op) \
DELEGATE_BROADCAST_CUDA_BINARY_FUNCTION( \
std::int32_t, std::int32_t, Func, Op) \
DELEGATE_BROADCAST_CUDA_BINARY_FUNCTION( \
std::int64_t, std::int64_t, Func, Op) \
DELEGATE_BROADCAST_CUDA_BINARY_FUNCTION(float, float, Func, Op) \
DELEGATE_BROADCAST_CUDA_BINARY_FUNCTION(double, double, Func, Op) \
DELEGATE_BROADCAST_CUDA_BINARY_FUNCTION(at::Half, at::Half, Func, Op)
DEFINE_BROADCAST_CUDA_BINARY_FUNCTION(Add, AddFunctor)
DEFINE_BROADCAST_CUDA_BINARY_FUNCTION(Sub, SubFunctor)
DEFINE_BROADCAST_CUDA_BINARY_FUNCTION(Mul, MulFunctor)
DEFINE_BROADCAST_CUDA_BINARY_FUNCTION(Div, DivFunctor)
#undef DEFINE_BROADCAST_CUDA_BINARY_FUNCTION
DELEGATE_BROADCAST_CUDA_BINARY_FUNCTION(bool, bool, And, thrust::logical_and)
DELEGATE_BROADCAST_CUDA_BINARY_FUNCTION(bool, bool, Or, thrust::logical_or)
DELEGATE_BROADCAST_CUDA_BINARY_FUNCTION(bool, bool, Xor, thrust::bit_xor)
#define DEFINE_BROADCAST_CUDA_BITWISE_BINARY_FUNCTION(Func, Op) \
DELEGATE_BROADCAST_CUDA_BINARY_FUNCTION(bool, bool, Func, Op) \
DELEGATE_BROADCAST_CUDA_BINARY_FUNCTION( \
std::int32_t, std::int32_t, Func, Op) \
DELEGATE_BROADCAST_CUDA_BINARY_FUNCTION(std::int64_t, std::int64_t, Func, Op)
DEFINE_BROADCAST_CUDA_BITWISE_BINARY_FUNCTION(BitwiseAnd, thrust::bit_and)
DEFINE_BROADCAST_CUDA_BITWISE_BINARY_FUNCTION(BitwiseOr, thrust::bit_or)
DEFINE_BROADCAST_CUDA_BITWISE_BINARY_FUNCTION(BitwiseXor, thrust::bit_xor)
#undef DEFINE_BROADCAST_CUDA_BITWISE_BINARY_FUNCTION
#undef DELEGATE_BROADCAST_CUDA_BINARY_FUNCTION
#define DELEGATE_REDUCTION_FUNCTION(T, Funcname, func) \
template <> \
CAFFE2_CUDA_EXPORT void Funcname<T, CUDAContext>( \
const int N, \
const T* src, \
T* dst, \
Tensor* scratch_ptr, \
CUDAContext* context) { \
size_t memRequired = 0; \
cub::DeviceReduce::func( \
nullptr, memRequired, src, dst, N, context->cuda_stream()); \
auto buffer_size = \
static_cast<TIndex>((memRequired + sizeof(T) - 1) / sizeof(T)); \
scratch_ptr->Resize(std::vector<TIndex>{buffer_size}); \
cub::DeviceReduce::func( \
static_cast<void*>(scratch_ptr->mutable_data<T>()), \
memRequired, \
src, \
dst, \
N, \
context->cuda_stream()); \
}
DELEGATE_REDUCTION_FUNCTION(float, ReduceMin, Min)
DELEGATE_REDUCTION_FUNCTION(float, ReduceMax, Max)
DELEGATE_REDUCTION_FUNCTION(int32_t, ReduceMax, Max)
DELEGATE_REDUCTION_FUNCTION(int64_t, ReduceMax, Max)
#undef DELEGATE_REDUCTION_FUNCTION
// Caffe2 gemm provides a simpler interface to the gemm functions, with the
// limitation that the data has to be contiguous in memory.
template <>
CAFFE2_CUDA_EXPORT void Gemm<float, CUDAContext>(
const CBLAS_TRANSPOSE trans_A,
const CBLAS_TRANSPOSE trans_B,
const int M,
const int N,
const int K,
const float alpha,
const float* A,
const float* B,
const float beta,
float* C,
CUDAContext* context,
TensorProto::DataType math_type) {
// Note that cublas follows fortran order, so the order is different from
// the cblas convention.
const int lda = (trans_A == CblasNoTrans) ? K : M;
const int ldb = (trans_B == CblasNoTrans) ? N : K;
const cublasOperation_t cu_trans_A =
(trans_A == CblasNoTrans) ? CUBLAS_OP_N : CUBLAS_OP_T;
const cublasOperation_t cu_trans_B =
(trans_B == CblasNoTrans) ? CUBLAS_OP_N : CUBLAS_OP_T;
CUBLAS_ENFORCE(
cublasSetPointerMode(context->cublas_handle(), CUBLAS_POINTER_MODE_HOST));
CUBLAS_ENFORCE(cublasSgemm(
context->cublas_handle(),
cu_trans_B,
cu_trans_A,
N,
M,
K,
&alpha,
B,
ldb,
A,
lda,
&beta,
C,
N));
}
template <>
CAFFE2_CUDA_EXPORT void Gemm<at::Half, CUDAContext>(
const CBLAS_TRANSPOSE trans_A,
const CBLAS_TRANSPOSE trans_B,
const int M,
const int N,
const int K,
const float alpha,
const at::Half* A,
const at::Half* B,
const float beta,
at::Half* C,
CUDAContext* context,
TensorProto::DataType math_type) {
// Note that cublas follows fortran order, so the order is different from
// the cblas convention.
const int lda = (trans_A == CblasNoTrans) ? K : M;
const int ldb = (trans_B == CblasNoTrans) ? N : K;
const cublasOperation_t cu_trans_A =
(trans_A == CblasNoTrans) ? CUBLAS_OP_N : CUBLAS_OP_T;
const cublasOperation_t cu_trans_B =
(trans_B == CblasNoTrans) ? CUBLAS_OP_N : CUBLAS_OP_T;
if (math_type == TensorProto_DataType_FLOAT) {
CUBLAS_ENFORCE(cublasSetPointerMode(
context->cublas_handle(), CUBLAS_POINTER_MODE_HOST));
CUBLAS_ENFORCE(cublasSgemmEx(
context->cublas_handle(),
cu_trans_B,
cu_trans_A,
N,
M,
K,
&alpha,
B,
CUDA_R_16F,
ldb,
A,
CUDA_R_16F,
lda,
&beta,
C,
CUDA_R_16F,
N));
} else if (math_type == TensorProto_DataType_FLOAT16) {
// convert alpha, beta from float -> __half
const __half alpha_fp16 = at::Half(alpha);
const __half beta_fp16 = at::Half(beta);
// call cublasHgemm
CUBLAS_ENFORCE(cublasSetPointerMode(
context->cublas_handle(), CUBLAS_POINTER_MODE_HOST));
CUBLAS_ENFORCE(cublasHgemm(
context->cublas_handle(),
cu_trans_B,
cu_trans_A,
N,
M,
K,
&alpha_fp16,
(const __half*)B,
ldb,
(const __half*)A,
lda,
&beta_fp16,
(__half*)C,
N));
} else {
// fail
CAFFE_THROW("Unsupported math type");
}
}
template <>
CAFFE2_CUDA_EXPORT void BiasCHW<float, CUDAContext>(
const float* bias,
const float* bias_multiplier,
const int bias_channels,
const int image_size,
float* image,
CUDAContext* context) {
Gemm<float, CUDAContext>(
CblasNoTrans,
CblasNoTrans,
bias_channels,
image_size,
1,
1,
bias,
bias_multiplier,
1,
image,
context);
}
template <>
CAFFE2_CUDA_EXPORT void GemmBatched<float, CUDAContext>(
const CBLAS_TRANSPOSE trans_A,
const CBLAS_TRANSPOSE trans_B,
const int batch_size,
const int M,
const int N,
const int K,
const float alpha,
const float** A,
const float** B,
const float beta,
float** C,
CUDAContext* context,
TensorProto::DataType math_type) {
#if __CUDACC_VER_MAJOR__ < 8
// loop over matrices in the batch
for (int i = 0; i < batch_size; ++i) {
Gemm<float, CUDAContext>(
trans_A,
trans_B,
M,
N,
K,
alpha,
A[i],
B[i],
beta,
C[i],
context,
math_type);
}
#else
// Note that cublas follows fortran order, so the order is different from
// the cblas convention.
const int lda = (trans_A == CblasNoTrans) ? K : M;
const int ldb = (trans_B == CblasNoTrans) ? N : K;
const int ldc = N;
const cublasOperation_t cu_trans_A =
(trans_A == CblasNoTrans) ? CUBLAS_OP_N : CUBLAS_OP_T;
const cublasOperation_t cu_trans_B =
(trans_B == CblasNoTrans) ? CUBLAS_OP_N : CUBLAS_OP_T;
thrust::device_vector<const float*> A_device(A, A + batch_size);
thrust::device_vector<const float*> B_device(B, B + batch_size);
thrust::device_vector<float*> C_device(C, C + batch_size);
CUBLAS_ENFORCE(
cublasSetPointerMode(context->cublas_handle(), CUBLAS_POINTER_MODE_HOST));
CUBLAS_ENFORCE(cublasSgemmBatched(
context->cublas_handle(),
cu_trans_B,
cu_trans_A,
N,
M,
K,
&alpha,
B_device.data().get(),
ldb,
A_device.data().get(),
lda,
&beta,
C_device.data().get(),
ldc,
batch_size));
#endif
}
template <>
CAFFE2_CUDA_EXPORT void GemmStridedBatched<float, CUDAContext>(
const CBLAS_TRANSPOSE trans_A,
const CBLAS_TRANSPOSE trans_B,
const int batch_size,
const int M,
const int N,
const int K,
const float alpha,
const float* A,
const int A_stride,
const float* B,
const int B_stride,
const float beta,
float* C,
const int C_stride,
CUDAContext* context,
TensorProto::DataType math_type) {
#if __CUDACC_VER_MAJOR__ < 8
// loop over matrices in the batch
for (int i = 0; i < batch_size; ++i) {
Gemm<float, CUDAContext>(
trans_A, trans_B, M, N, K, alpha, A, B, beta, C, context, math_type);
A += A_stride;
B += B_stride;
C += C_stride;
}
#else
// Note that cublas follows fortran order, so the order is different from
// the cblas convention.
const int lda = (trans_A == CblasNoTrans) ? K : M;
const int ldb = (trans_B == CblasNoTrans) ? N : K;
const int ldc = N;
const cublasOperation_t cu_trans_A =
(trans_A == CblasNoTrans) ? CUBLAS_OP_N : CUBLAS_OP_T;
const cublasOperation_t cu_trans_B =
(trans_B == CblasNoTrans) ? CUBLAS_OP_N : CUBLAS_OP_T;
CUBLAS_ENFORCE(
cublasSetPointerMode(context->cublas_handle(), CUBLAS_POINTER_MODE_HOST));
CUBLAS_ENFORCE(cublasSgemmStridedBatched(
context->cublas_handle(),
cu_trans_B,
cu_trans_A,
N,
M,
K,
&alpha,
B,
ldb,
B_stride,
A,
lda,
A_stride,
&beta,
C,
ldc,
C_stride,
batch_size));
#endif
}
template <>
CAFFE2_CUDA_EXPORT void GemmBatched<at::Half, CUDAContext>(
const CBLAS_TRANSPOSE trans_A,
const CBLAS_TRANSPOSE trans_B,
const int batch_size,
const int M,
const int N,
const int K,
const float alpha,
const at::Half** A,
const at::Half** B,
const float beta,
at::Half** C,
CUDAContext* context,
TensorProto::DataType math_type) {
#if __CUDACC_VER_MAJOR__ < 9
// loop over matrices in the batch
for (int i = 0; i < batch_size; ++i) {
Gemm<at::Half, CUDAContext>(
trans_A,
trans_B,
M,
N,
K,
alpha,
A[i],
B[i],
beta,
C[i],
context,
math_type);
}
#else
// Note that cublas follows fortran order, so the order is different from
// the cblas convention.
const int lda = (trans_A == CblasNoTrans) ? K : M;
const int ldb = (trans_B == CblasNoTrans) ? N : K;
const int ldc = N;
const cublasOperation_t cu_trans_A =
(trans_A == CblasNoTrans) ? CUBLAS_OP_N : CUBLAS_OP_T;
const cublasOperation_t cu_trans_B =
(trans_B == CblasNoTrans) ? CUBLAS_OP_N : CUBLAS_OP_T;
if (math_type == TensorProto_DataType_FLOAT) {
#if CUDA_VERSION < 9010
// loop over matrices in the batch
for (int i = 0; i < batch_size; ++i) {
Gemm<at::Half, CUDAContext>(
trans_A,
trans_B,
M,
N,
K,
alpha,
A[i],
B[i],
beta,
C[i],
context,
math_type);
}
#else
thrust::device_vector<const void*> A_device(A, A + batch_size);
thrust::device_vector<const void*> B_device(B, B + batch_size);
thrust::device_vector<void*> C_device(C, C + batch_size);
CUBLAS_ENFORCE(cublasSetPointerMode(
context->cublas_handle(), CUBLAS_POINTER_MODE_HOST));
CUBLAS_ENFORCE(cublasGemmBatchedEx(
context->cublas_handle(),
cu_trans_B,
cu_trans_A,
N,
M,
K,
&alpha,
B_device.data().get(),
CUDA_R_16F,
ldb,
A_device.data().get(),
CUDA_R_16F,
lda,
&beta,
C_device.data().get(),
CUDA_R_16F,
ldc,
batch_size,
CUDA_R_32F,
CUBLAS_GEMM_DEFAULT_TENSOR_OP));
#endif
} else if (math_type == TensorProto_DataType_FLOAT16) {
// Convert alpha, beta from float -> __half
const __half alpha_fp16 = at::Half(alpha);
const __half beta_fp16 = at::Half(beta);
std::vector<const __half*> A_array(batch_size);
std::vector<const __half*> B_array(batch_size);
std::vector<__half*> C_array(batch_size);
for (int i = 0; i < batch_size; ++i) {
A_array[i] = reinterpret_cast<const __half*>(A[i]);
B_array[i] = reinterpret_cast<const __half*>(B[i]);
C_array[i] = reinterpret_cast<__half*>(C[i]);
}
thrust::device_vector<const __half*> A_device(
A_array.cbegin(), A_array.cend());
thrust::device_vector<const __half*> B_device(
B_array.cbegin(), B_array.cend());
thrust::device_vector<__half*> C_device(C_array.cbegin(), C_array.cend());
CUBLAS_ENFORCE(cublasSetPointerMode(
context->cublas_handle(), CUBLAS_POINTER_MODE_HOST));
CUBLAS_ENFORCE(cublasHgemmBatched(
context->cublas_handle(),
cu_trans_B,
cu_trans_A,
N,
M,
K,
&alpha_fp16,
B_device.data().get(),
ldb,
A_device.data().get(),
lda,
&beta_fp16,
C_device.data().get(),
ldc,
batch_size));
} else {
CAFFE_THROW("Unsupported math type");
}
#endif
}
template <>
CAFFE2_CUDA_EXPORT void GemmStridedBatched<at::Half, CUDAContext>(
const CBLAS_TRANSPOSE trans_A,
const CBLAS_TRANSPOSE trans_B,
const int batch_size,
const int M,
const int N,
const int K,
const float alpha,
const at::Half* A,
const int A_stride,
const at::Half* B,
const int B_stride,
const float beta,
at::Half* C,
const int C_stride,
CUDAContext* context,
TensorProto::DataType math_type) {
#if __CUDACC_VER_MAJOR__ < 8
// loop over matrices in the batch
for (int i = 0; i < batch_size; ++i) {
Gemm<at::Half, CUDAContext>(
trans_A, trans_B, M, N, K, alpha, A, B, beta, C, context, math_type);
A += A_stride;
B += B_stride;
C += C_stride;
}
#else
// Note that cublas follows fortran order, so the order is different from
// the cblas convention.
const int lda = (trans_A == CblasNoTrans) ? K : M;
const int ldb = (trans_B == CblasNoTrans) ? N : K;
const int ldc = N;
const cublasOperation_t cu_trans_A =
(trans_A == CblasNoTrans) ? CUBLAS_OP_N : CUBLAS_OP_T;
const cublasOperation_t cu_trans_B =
(trans_B == CblasNoTrans) ? CUBLAS_OP_N : CUBLAS_OP_T;
if (math_type == TensorProto_DataType_FLOAT) {
#if CUDA_VERSION < 9010
// loop over matrices in the batch
for (int i = 0; i < batch_size; ++i) {
Gemm<at::Half, CUDAContext>(
trans_A, trans_B, M, N, K, alpha, A, B, beta, C, context, math_type);
A += A_stride;
B += B_stride;
C += C_stride;
}
#else
CUBLAS_ENFORCE(cublasSetPointerMode(
context->cublas_handle(), CUBLAS_POINTER_MODE_HOST));
CUBLAS_ENFORCE(cublasGemmStridedBatchedEx(
context->cublas_handle(),
cu_trans_B,
cu_trans_A,
N,
M,
K,
&alpha,
B,
CUDA_R_16F,
ldb,
B_stride,
A,
CUDA_R_16F,
lda,
A_stride,
&beta,
C,
CUDA_R_16F,
ldc,
C_stride,
batch_size,
CUDA_R_32F,
CUBLAS_GEMM_DEFAULT_TENSOR_OP));
#endif
} else if (math_type == TensorProto_DataType_FLOAT16) {
// Convert alpha, beta from float -> __half
const __half alpha_fp16 = at::Half(alpha);
const __half beta_fp16 = at::Half(beta);
CUBLAS_ENFORCE(cublasSetPointerMode(
context->cublas_handle(), CUBLAS_POINTER_MODE_HOST));
CUBLAS_ENFORCE(cublasHgemmStridedBatched(
context->cublas_handle(),
cu_trans_B,
cu_trans_A,
N,
M,
K,
&alpha_fp16,
(const __half*)B,
ldb,
B_stride,
(const __half*)A,
lda,
A_stride,
&beta_fp16,
(__half*)C,
ldc,
C_stride,
batch_size));
} else {
CAFFE_THROW("Unsupported math type");
}
#endif
}
#if CUDA_VERSION >= 9000
// No change, but required. Defer to default CUDA engine
template <>
CAFFE2_CUDA_EXPORT void Gemm<float, CUDAContext, TensorCoreEngine>(
const CBLAS_TRANSPOSE trans_A,
const CBLAS_TRANSPOSE trans_B,
const int M,
const int N,
const int K,
const float alpha,
const float* A,
const float* B,
const float beta,
float* C,
CUDAContext* context,
TensorProto::DataType math_type) {
return Gemm<float, CUDAContext>(
trans_A, trans_B, M, N, K, alpha, A, B, beta, C, context, math_type);
}
template <>
CAFFE2_CUDA_EXPORT void Gemm<at::Half, CUDAContext, TensorCoreEngine>(
const CBLAS_TRANSPOSE trans_A,
const CBLAS_TRANSPOSE trans_B,
const int M,
const int N,
const int K,
const float alpha,
const at::Half* A,
const at::Half* B,
const float beta,
at::Half* C,
CUDAContext* context,
TensorProto::DataType math_type) {
// Note that cublas follows fortran order, so the order is different from
// the cblas convention.
const int lda = (trans_A == CblasNoTrans) ? K : M;
const int ldb = (trans_B == CblasNoTrans) ? N : K;
const cublasOperation_t cu_trans_A =
(trans_A == CblasNoTrans) ? CUBLAS_OP_N : CUBLAS_OP_T;
const cublasOperation_t cu_trans_B =
(trans_B == CblasNoTrans) ? CUBLAS_OP_N : CUBLAS_OP_T;
// enable TensorCore for this call on this handle
if (TensorCoreAvailable()) {
CUBLAS_ENFORCE(
cublasSetMathMode(context->cublas_handle(), CUBLAS_TENSOR_OP_MATH));
}
CUBLAS_ENFORCE(
cublasSetPointerMode(context->cublas_handle(), CUBLAS_POINTER_MODE_HOST));
CUBLAS_ENFORCE(cublasGemmEx(
context->cublas_handle(),
cu_trans_B,
cu_trans_A,
N,
M,
K,
&alpha,
B,
CUDA_R_16F,
ldb,
A,
CUDA_R_16F,
lda,
&beta,
C,
CUDA_R_16F,
N,
CUDA_R_32F,
CUBLAS_GEMM_DFALT_TENSOR_OP));
// Now disable TensorCore math for subsequent calls to this handle
if (TensorCoreAvailable()) {
CUBLAS_ENFORCE(
cublasSetMathMode(context->cublas_handle(), CUBLAS_DEFAULT_MATH));
}
}
template <>
CAFFE2_CUDA_EXPORT void GemmStridedBatched<float, CUDAContext, TensorCoreEngine>(
const CBLAS_TRANSPOSE trans_A,
const CBLAS_TRANSPOSE trans_B,
const int batch_size,
const int M,
const int N,
const int K,
const float alpha,
const float* A,
const int A_stride,
const float* B,
const int B_stride,
const float beta,
float* C,
const int C_stride,
CUDAContext* context,
TensorProto::DataType math_type) {
return GemmStridedBatched<float, CUDAContext, DefaultEngine>(
trans_A,
trans_B,
batch_size,
M,
N,
K,
alpha,
A,
A_stride,
B,
B_stride,
beta,
C,
C_stride,
context,
math_type);
}
template <>
CAFFE2_CUDA_EXPORT void GemmStridedBatched<at::Half, CUDAContext, TensorCoreEngine>(
const CBLAS_TRANSPOSE trans_A,
const CBLAS_TRANSPOSE trans_B,
const int batch_size,
const int M,
const int N,
const int K,
const float alpha,
const at::Half* A,
const int A_stride,
const at::Half* B,
const int B_stride,
const float beta,
at::Half* C,
const int C_stride,
CUDAContext* context,
TensorProto::DataType math_type) {
return GemmStridedBatched<at::Half, CUDAContext, DefaultEngine>(
trans_A,
trans_B,
batch_size,
M,
N,
K,
alpha,
A,
A_stride,
B,
B_stride,
beta,
C,
C_stride,
context,
math_type);
}
#endif // CUDA_VERSION >= 9000
template <>
CAFFE2_CUDA_EXPORT void GemmEx<float, CUDAContext>(
const CBLAS_TRANSPOSE trans_A,
const CBLAS_TRANSPOSE trans_B,
const int M,
const int N,
const int K,
const float alpha,
const float* A,
const int lda,
const float* B,
const int ldb,
const float beta,
float* C,
const int ldc,
CUDAContext* context) {
// Note that cublas follows fortran order, so the order is different from
// the cblas convention.
const cublasOperation_t cu_trans_A =
(trans_A == CblasNoTrans) ? CUBLAS_OP_N : CUBLAS_OP_T;
const cublasOperation_t cu_trans_B =
(trans_B == CblasNoTrans) ? CUBLAS_OP_N : CUBLAS_OP_T;
CUBLAS_ENFORCE(
cublasSetPointerMode(context->cublas_handle(), CUBLAS_POINTER_MODE_HOST));
CUBLAS_ENFORCE(cublasSgemm(
context->cublas_handle(),
cu_trans_B,
cu_trans_A,
N,
M,
K,
&alpha,
B,
ldb,
A,
lda,
&beta,
C,
ldc));
}
template <>
CAFFE2_CUDA_EXPORT void Gemv<float, CUDAContext>(
const CBLAS_TRANSPOSE trans_A,
const int M,
const int N,
const float alpha,
const float* A,
const float* x,
const float beta,
float* y,
CUDAContext* context,
TensorProto::DataType math_type) {
const cublasOperation_t cu_trans_A =
(trans_A == CblasNoTrans) ? CUBLAS_OP_T : CUBLAS_OP_N;
CUBLAS_ENFORCE(
cublasSetPointerMode(context->cublas_handle(), CUBLAS_POINTER_MODE_HOST));
CUBLAS_ENFORCE(cublasSgemv(
context->cublas_handle(),
cu_trans_A,
N,
M,
&alpha,
A,
N,
x,
1,
&beta,
y,
1));
}
// Batched Add variants
namespace {
template <typename T>
__global__ void AddStripedBatchKernel(
const int N,
const T* first,
T* Y,
const int stripe,
const int batch) {
for (int j = 0; j < batch; j++) {
const T* x = first + j * stripe;
CUDA_1D_KERNEL_LOOP(i, N) {
float tmpY = convert::To<T, float>(Y[i]);
tmpY += convert::To<T, float>(x[i]);
Y[i] = convert::To<float, T>(tmpY);
}
}
}
} // namespace
#define CAFFE2_SPECIALIZED_CUDA_ADD_STRIPED_BATCH(T) \
template <> \
CAFFE2_CUDA_EXPORT void AddStripedBatch<T, CUDAContext>( \
const int N, \
const T* first, \
T* Y, \
const int stripe, \
const int batch, \
CUDAContext* context) { \
AddStripedBatchKernel<T> \
<<<CAFFE_GET_BLOCKS(N), \
CAFFE_CUDA_NUM_THREADS, \
0, \
context->cuda_stream()>>>(N, first, Y, stripe, batch); \
}
CAFFE2_SPECIALIZED_CUDA_ADD_STRIPED_BATCH(float);
CAFFE2_SPECIALIZED_CUDA_ADD_STRIPED_BATCH(at::Half);
#undef CAFFE2_SPECIALIZED_CUDA_ADD_STRIPED_BATCH
template <>
CAFFE2_CUDA_EXPORT void Gemv<at::Half, CUDAContext>(
const CBLAS_TRANSPOSE trans_A,
const int M,
const int N,
const float alpha,
const at::Half* A,
const at::Half* x,
const float beta,
at::Half* y,
CUDAContext* context,
TensorProto::DataType math_type) {
const cublasOperation_t cu_trans_A =
(trans_A == CblasNoTrans) ? CUBLAS_OP_T : CUBLAS_OP_N;
// sort out what we need to call cublasSgemmEx / cublasHgemm
const int m = (cu_trans_A == CUBLAS_OP_N) ? N : M;
const int k = (cu_trans_A == CUBLAS_OP_N) ? M : N;
const int lda = (cu_trans_A == CUBLAS_OP_N) ? m : k;
const int ldc = m;
if (math_type == TensorProto_DataType_FLOAT) {
CUBLAS_ENFORCE(cublasSetPointerMode(
context->cublas_handle(), CUBLAS_POINTER_MODE_HOST));
CUBLAS_ENFORCE(cublasSgemmEx(
context->cublas_handle(),
cu_trans_A,
CUBLAS_OP_N,
m,
1,
k,
&alpha,
A,
CUDA_R_16F,
lda,
x,
CUDA_R_16F,
k,
&beta,
y,
CUDA_R_16F,
ldc));
} else if (math_type == TensorProto_DataType_FLOAT16) {
const __half alpha_fp16 = at::Half(alpha);
const __half beta_fp16 = at::Half(beta);
CUBLAS_ENFORCE(cublasSetPointerMode(
context->cublas_handle(), CUBLAS_POINTER_MODE_HOST));
CUBLAS_ENFORCE(cublasHgemm(
context->cublas_handle(),
cu_trans_A,
CUBLAS_OP_N,
m,
1,
k,
&alpha_fp16,
(const __half*)A,
lda,
(const __half*)x,
k,
&beta_fp16,
(__half*)y,
ldc));
} else {
// fail
CAFFE_THROW("Unsupported math type");
}
}
namespace {
template <typename T>
__global__ void SetKernel(const int N, const T alpha, T* Y) {
CUDA_1D_KERNEL_LOOP(i, N) {
Y[i] = alpha;
}
}
} // namespace
#define CAFFE2_SPECIALIZED_CUDA_SET(T) \
template <> \
CAFFE2_CUDA_API void Set<T, CUDAContext>( \
const size_t N, const T alpha, T* Y, CUDAContext* context) { \
if (N == 0) { \
return; \
} \
if (alpha == T(0)) { \
cudaMemsetAsync(Y, 0, sizeof(T) * N, context->cuda_stream()); \
} else { \
SetKernel<T> \
<<<CAFFE_GET_BLOCKS(N), \
CAFFE_CUDA_NUM_THREADS, \
0, \
context->cuda_stream()>>>(N, alpha, Y); \
} \
}
CAFFE2_SPECIALIZED_CUDA_SET(float);
CAFFE2_SPECIALIZED_CUDA_SET(double);
CAFFE2_SPECIALIZED_CUDA_SET(bool);
CAFFE2_SPECIALIZED_CUDA_SET(int8_t);
CAFFE2_SPECIALIZED_CUDA_SET(int16_t);
CAFFE2_SPECIALIZED_CUDA_SET(int);
CAFFE2_SPECIALIZED_CUDA_SET(int64_t);
CAFFE2_SPECIALIZED_CUDA_SET(char);
CAFFE2_SPECIALIZED_CUDA_SET(uint8_t);
CAFFE2_SPECIALIZED_CUDA_SET(uint16_t);
#undef CAFFE2_SPECIALIZED_CUDA_SET
template <>
CAFFE2_CUDA_EXPORT void Set<at::Half, CUDAContext>(
const size_t N,
const at::Half alpha,
at::Half* Y,
CUDAContext* context) {
if (N > 0) {
SetKernel<at::Half>
<<<CAFFE_GET_BLOCKS(N),
CAFFE_CUDA_NUM_THREADS,
0,
context->cuda_stream()>>>(N, alpha, Y);
}
}
namespace {
template <typename T>
__global__ void
UniformShift(const size_t N, const float min, const float max, T* x) {
float scale = max - min;
CUDA_1D_KERNEL_LOOP(i, N) {
x[i] = convert::To<float, T>(convert::To<T, float>(x[i]) * scale + min);
}
}
__global__ void
UniformIntFit(const size_t N, const int min, const int max, unsigned int* x) {
int* x_int = reinterpret_cast<int*>(x);
int range = (max - min + 1);
CUDA_1D_KERNEL_LOOP(i, N) {
x_int[i] = min + static_cast<int>(x[i] % range);
}
}
} // namespace
template <>
CAFFE2_CUDA_EXPORT void RandUniform<float, CUDAContext>(
const size_t n,
const float min,
const float max,
float* r,
CUDAContext* context) {
CURAND_ENFORCE(curandGenerateUniform(context->curand_generator(), r, n));
UniformShift<float>
<<<CAFFE_GET_BLOCKS(n),
CAFFE_CUDA_NUM_THREADS,
0,
context->cuda_stream()>>>(n, min, max, r);
}
template <>
CAFFE2_CUDA_EXPORT void RandUniform<double, CUDAContext>(
const size_t n,
const double min,
const double max,
double* r,
CUDAContext* context) {
CURAND_ENFORCE(
curandGenerateUniformDouble(context->curand_generator(), r, n));
UniformShift<double>
<<<CAFFE_GET_BLOCKS(n),
CAFFE_CUDA_NUM_THREADS,
0,
context->cuda_stream()>>>(n, min, max, r);
}
template <>
CAFFE2_CUDA_EXPORT void RandUniform<int, CUDAContext>(
const size_t n,
const int min,
const int max,
int* r,
CUDAContext* context) {
CURAND_ENFORCE(curandGenerate(
context->curand_generator(), reinterpret_cast<unsigned int*>(r), n));
UniformIntFit<<<
CAFFE_GET_BLOCKS(n),
CAFFE_CUDA_NUM_THREADS,
0,
context->cuda_stream()>>>(
n, min, max, reinterpret_cast<unsigned int*>(r));
}
template <typename T>
size_t HandleOddLengthRandGaussian(
const size_t n,
const T mean,
const T std,
T* r,
CUDAContext* context) {
if (n % 2 == 1) {
std::default_random_engine generator;
std::normal_distribution<T> distribution(mean, std);
const T random_value = distribution(generator);
Set<T, CUDAContext>(1, random_value, r + (n - 1), context);
return n - 1;
}
return n;
}
template <>
CAFFE2_CUDA_EXPORT void RandGaussian<float, CUDAContext>(
const size_t n,
const float mean,
const float std,
float* r,
CUDAContext* context) {
// If n is odd, we add a random Gaussian value at the end manually
// and generate n-1 random values using curandGenerateNormal.
// curandGenerateNormal requires n to be even.
const size_t even_n =
HandleOddLengthRandGaussian<float>(n, mean, std, r, context);
CURAND_ENFORCE(
curandGenerateNormal(context->curand_generator(), r, even_n, mean, std));
}
template <>
CAFFE2_CUDA_EXPORT void RandGaussian<double, CUDAContext>(
const size_t n,
const double mean,
const double std,
double* r,
CUDAContext* context) {
const size_t even_n =
HandleOddLengthRandGaussian<double>(n, mean, std, r, context);
CURAND_ENFORCE(curandGenerateNormalDouble(
context->curand_generator(), r, even_n, mean, std));
}
template <>
CAFFE2_CUDA_EXPORT void Dot<float, CUDAContext>(
const int n,
const float* a,
const float* b,
float* y,
CUDAContext* context) {
CUBLAS_ENFORCE(cublasSetPointerMode(
context->cublas_handle(), CUBLAS_POINTER_MODE_DEVICE));
CUBLAS_ENFORCE(cublasSdot(context->cublas_handle(), n, a, 1, b, 1, y));
}
template <>
CAFFE2_CUDA_EXPORT void Dot<at::Half, CUDAContext>(
const int n,
const at::Half* a,
const at::Half* b,
at::Half* y,
CUDAContext* context) {
// execute with 32-bit math
CUBLAS_ENFORCE(cublasSetPointerMode(
context->cublas_handle(), CUBLAS_POINTER_MODE_DEVICE));
CUBLAS_ENFORCE(cublasDotEx(
context->cublas_handle(),
n,
a,
CUDA_R_16F,
1,
b,
CUDA_R_16F,
1,
y,
CUDA_R_16F,
CUDA_R_32F));
}
// A previous version of caffe2 used Thrust but it turns out that thrust
// reduction has an implicit scratch space allocation and deallocation, which
// may interfere with NCCL and create a deadlock. Hence we are using a custom
// reduction here.
#define SUM_KERNEL_NTHREADS 128
template <typename T>
__global__ void SumKernel(const int N, const T* X, T* Y, bool square) {
const int idx = threadIdx.x;
__shared__ float reduction_buffer[SUM_KERNEL_NTHREADS];
reduction_buffer[idx] = 0;
// A multilevel reduction.
// N -> 128
if (!square) {
for (int i = idx; i < N; i += SUM_KERNEL_NTHREADS) {
reduction_buffer[idx] += convert::To<T, float>(X[i]);
}
} else {
for (int i = idx; i < N; i += SUM_KERNEL_NTHREADS) {
float Xi = convert::To<T, float>(X[i]);
reduction_buffer[idx] += Xi * Xi;
}
}
__syncthreads();
// 128 -> 32
if (idx < 32) {
reduction_buffer[idx] += reduction_buffer[idx + 32] +
reduction_buffer[idx + 64] + reduction_buffer[idx + 96];
}
__syncthreads();
// 32 -> 1
if (idx == 0) {
float tmp = 0;
for (int i = 0; i < 32; ++i) {
tmp += reduction_buffer[i];
}
*Y = convert::To<float, T>(tmp);
}
}
// According to the benchmarks script
// caffe2/caffe2/experiments/python/device_reduce_sum_bench.py,
// device reduce is slower for N <= 10000.
#define DEVICE_REDUCE_SIZE_THRESHOLD 10000
namespace {
template <typename T>
__global__ void SumConvertKernel(float* sum, T* dest) {
*dest = convert::To<float, T>(*sum);
}
template <typename T, typename IterT>
CAFFE2_CUDA_EXPORT void SumGenericIter(
const int N,
IterT it,
T*& dest,
CUDAContext* context,
Tensor* scratch_ptr) {
size_t memRequired = 0;
cub::DeviceReduce::Sum(
nullptr, memRequired, it, dest, N, context->cuda_stream());
auto buffer_size =
static_cast<TIndex>((memRequired + sizeof(T) - 1) / sizeof(T));
if (!dest) {
// allocate one more T at the end of scratch for dest
scratch_ptr->Resize(std::vector<TIndex>{buffer_size + 1});
dest = scratch_ptr->template mutable_data<T>() + buffer_size;
} else {
scratch_ptr->Resize(std::vector<TIndex>{buffer_size});
}
cub::DeviceReduce::Sum(
static_cast<void*>(scratch_ptr->template mutable_data<T>()),
memRequired,
it,
dest,
N,
context->cuda_stream());
}
} // namespace
template <>
CAFFE2_CUDA_EXPORT void Sum<float, CUDAContext>(
const int N,
const float* x,
float* y,
CUDAContext* context,
Tensor* scratch_ptr) {
if (scratch_ptr && N > DEVICE_REDUCE_SIZE_THRESHOLD) {
SumGenericIter<float>(N, x, y, context, scratch_ptr);
} else {
SumKernel<<<1, SUM_KERNEL_NTHREADS, 0, context->cuda_stream()>>>(
N, x, y, false);
}
}
template <>
CAFFE2_CUDA_EXPORT void Sum<int32_t, CUDAContext>(
const int N,
const int32_t* x,
int32_t* y,
CUDAContext* context,
Tensor* scratch_ptr) {
if (scratch_ptr && N > DEVICE_REDUCE_SIZE_THRESHOLD) {
SumGenericIter<int32_t>(N, x, y, context, scratch_ptr);
} else {
SumKernel<<<1, SUM_KERNEL_NTHREADS, 0, context->cuda_stream()>>>(
N, x, y, false);
}
}
namespace {
template <typename T>
struct FloatTransform {
inline __host__ __device__ float operator()(const T v) const {
return convert::To<T, float>(v);
}
};
} // namespace
#define CAFFE2_MATH_SUM_FUNC(T) \
template <> \
CAFFE2_CUDA_EXPORT void Sum<T, CUDAContext>( \
const int N, \
const T* x, \
T* y, \
CUDAContext* context, \
Tensor* scratch_ptr) { \
if (scratch_ptr && N > DEVICE_REDUCE_SIZE_THRESHOLD) { \
FloatTransform<T> transform; \
cub::TransformInputIterator<float, FloatTransform<T>, const T*> it( \
x, transform); \
float* sum = nullptr; \
SumGenericIter<float>(N, it, sum, context, scratch_ptr); \
SumConvertKernel<<<1, 1, 0, context->cuda_stream()>>>(sum, y); \
} else { \
SumKernel<<<1, SUM_KERNEL_NTHREADS, 0, context->cuda_stream()>>>( \
N, x, y, false); \
} \
}
CAFFE2_MATH_SUM_FUNC(at::Half)
#undef CAFFE2_MATH_SUM_FUNC
namespace {
template <typename T>
struct SqrTransform {
inline __host__ __device__ T operator()(const T v) const {
return v * v;
}
};
} // namespace
template <>
CAFFE2_CUDA_EXPORT void SumSqr<float, CUDAContext>(
const int N,
const float* x,
float* y,
CUDAContext* context,
Tensor* scratch_ptr) {
if (scratch_ptr && N > DEVICE_REDUCE_SIZE_THRESHOLD) {
SqrTransform<float> transform;
cub::TransformInputIterator<float, SqrTransform<float>, const float*> it(
x, transform);
SumGenericIter<float>(N, it, y, context, scratch_ptr);
} else {
SumKernel<<<1, SUM_KERNEL_NTHREADS, 0, context->cuda_stream()>>>(
N, x, y, true);
}
}
#define CAFFE2_MATH_SUMSQR_FUNC(T) \
template <> \
CAFFE2_CUDA_EXPORT void SumSqr<T, CUDAContext>( \
const int N, \
const T* x, \
T* y, \
CUDAContext* context, \
Tensor* scratch_ptr) { \
if (scratch_ptr && N > DEVICE_REDUCE_SIZE_THRESHOLD) { \
FloatTransform<T> float_transform; \
cub::TransformInputIterator<float, FloatTransform<T>, const T*> \
float_it(x, float_transform); \
SqrTransform<float> sqr_transform; \
cub::TransformInputIterator< \
float, \
SqrTransform<float>, \
decltype(float_it)> \
it(float_it, sqr_transform); \
float* sum = nullptr; \
SumGenericIter<float>(N, it, sum, context, scratch_ptr); \
SumConvertKernel<<<1, 1, 0, context->cuda_stream()>>>(sum, y); \
} else { \
SumKernel<<<1, SUM_KERNEL_NTHREADS, 0, context->cuda_stream()>>>( \
N, x, y, true); \
} \
}
CAFFE2_MATH_SUMSQR_FUNC(at::Half)
#undef CAFFE2_MATH_SUMSQR_FUNC
#undef DEVICE_REDUCE_SIZE_THRESHOLD
namespace {
template <typename T>
__global__ void
SelectKernel(const int N, const int D, const T* x, const int* idx, T* y) {
CUDA_1D_KERNEL_LOOP(i, N) {
y[i] = x[i * D + idx[i]];
}
}
} // namespace
template <>
CAFFE2_CUDA_EXPORT void Select<float, CUDAContext>(
const int N,
const int D,
const float* x,
const int* idx,
float* y,
CUDAContext* context) {
SelectKernel<float>
<<<CAFFE_GET_BLOCKS(N),
CAFFE_CUDA_NUM_THREADS,
0,
context->cuda_stream()>>>(N, D, x, idx, y);
}
template <>
CAFFE2_CUDA_EXPORT void Select<at::Half, CUDAContext>(
const int N,
const int D,
const at::Half* x,
const int* idx,
at::Half* y,
CUDAContext* context) {
SelectKernel<at::Half>
<<<CAFFE_GET_BLOCKS(N),
CAFFE_CUDA_NUM_THREADS,
0,
context->cuda_stream()>>>(N, D, x, idx, y);
}
namespace {
template <typename TAlpha, typename TData>
__global__ void
ScaleCUDAKernel(const int n, const TAlpha alpha, const TData* x, TData* y) {
CUDA_1D_KERNEL_LOOP(i, n) {
#if __CUDA_ARCH__ >= 350
y[i] = __ldg(x + i) * static_cast<TData>(alpha);
#else
y[i] = x[i] * static_cast<TData>(alpha);
#endif
}
}
template <typename TAlpha, typename TData>
__global__ void
ScaleCUDAKernel(const int n, const TAlpha* alpha, const TData* x, TData* y) {
CUDA_1D_KERNEL_LOOP(i, n) {
#if __CUDA_ARCH__ >= 350
y[i] = __ldg(x + i) * static_cast<TData>(__ldg(alpha));
#else
y[i] = x[i] * static_cast<TData>(*alpha);
#endif
}
}
template <typename T>
__global__ void PowKernel(const int n, const T* x, const T exponent, T* y) {
CUDA_1D_KERNEL_LOOP(i, n) {
y[i] = powf(x[i], exponent);
}
}
} // namespace
template <>
CAFFE2_CUDA_EXPORT void Powx<float, CUDAContext>(
const int N,
const float* a,
const float b,
float* y,
CUDAContext* context) {
PowKernel<<<
CAFFE_GET_BLOCKS(N),
CAFFE_CUDA_NUM_THREADS,
0,
context->cuda_stream()>>>(N, a, b, y);
}
#define DELEGATE_CUBLAS_SCALE_FUNCTION(TAlpha, TData, CuBLASFunc) \
template <> \
CAFFE2_CUDA_EXPORT void Scale<TAlpha, TData, CUDAContext>( \
const int N, \
const TAlpha alpha, \
const TData* x, \
TData* y, \
CUDAContext* context) { \
if (N == 0) { \
return; \
} \
if (x != y) { \
cudaMemcpyAsync( \
y, \
x, \
sizeof(TData) * N, \
cudaMemcpyDeviceToDevice, \
context->cuda_stream()); \
} \
if (alpha != TAlpha(1)) { \
CUBLAS_ENFORCE(cublasSetPointerMode( \
context->cublas_handle(), CUBLAS_POINTER_MODE_HOST)); \
CUBLAS_ENFORCE(CuBLASFunc(context->cublas_handle(), N, &alpha, y, 1)); \
} \
} \
template <> \
CAFFE2_CUDA_EXPORT void Scale<TAlpha, TData, CUDAContext>( \
const int N, \
const TAlpha* alpha, \
const TData* x, \
TData* y, \
CUDAContext* context) { \
if (N == 0) { \
return; \
} \
if (x != y) { \
cudaMemcpyAsync( \
y, \
x, \
sizeof(TData) * N, \
cudaMemcpyDeviceToDevice, \
context->cuda_stream()); \
} \
CUBLAS_ENFORCE(cublasSetPointerMode( \
context->cublas_handle(), CUBLAS_POINTER_MODE_DEVICE)); \
CUBLAS_ENFORCE(CuBLASFunc(context->cublas_handle(), N, alpha, y, 1)); \
}
DELEGATE_CUBLAS_SCALE_FUNCTION(float, float, cublasSscal)
DELEGATE_CUBLAS_SCALE_FUNCTION(double, double, cublasDscal)
#undef DELEGATE_CUBLAS_SCALE_FUNCTION
#define CAFFE2_SPECIALIZED_CUDA_SCALE(TAlpha, TData) \
template <> \
CAFFE2_CUDA_EXPORT void Scale<TAlpha, TData, CUDAContext>( \
const int N, \
const TAlpha alpha, \
const TData* x, \
TData* y, \
CUDAContext* context) { \
if (N == 0) { \
return; \
} \
if (alpha == TAlpha(1)) { \
if (x != y) { \
cudaMemcpyAsync( \
y, \
x, \
sizeof(TData) * N, \
cudaMemcpyDeviceToDevice, \
context->cuda_stream()); \
} \
return; \
} \
ScaleCUDAKernel<TAlpha, TData> \
<<<CAFFE_GET_BLOCKS(N), \
CAFFE_CUDA_NUM_THREADS, \
0, \
context->cuda_stream()>>>(N, alpha, x, y); \
} \
template <> \
CAFFE2_CUDA_EXPORT void Scale<TAlpha, TData, CUDAContext>( \
const int N, \
const TAlpha* alpha, \
const TData* x, \
TData* y, \
CUDAContext* context) { \
if (N == 0) { \
return; \
} \
ScaleCUDAKernel<TAlpha, TData> \
<<<CAFFE_GET_BLOCKS(N), \
CAFFE_CUDA_NUM_THREADS, \
0, \
context->cuda_stream()>>>(N, alpha, x, y); \
}
CAFFE2_SPECIALIZED_CUDA_SCALE(std::int32_t, std::int32_t)
CAFFE2_SPECIALIZED_CUDA_SCALE(std::int64_t, std::int64_t)
#undef CAFFE2_SPECIALIZED_CUDA_SCALE
template <>
CAFFE2_CUDA_EXPORT void Scale<at::Half, at::Half, CUDAContext>(
const int N,
const at::Half alpha,
const at::Half* x,
at::Half* y,
CUDAContext* context) {
if (N == 0) {
return;
}
if (x != y) {
cudaMemcpyAsync(
y,
x,
sizeof(at::Half) * N,
cudaMemcpyDeviceToDevice,
context->cuda_stream());
}
CUBLAS_ENFORCE(
cublasSetPointerMode(context->cublas_handle(), CUBLAS_POINTER_MODE_HOST));
CUBLAS_ENFORCE(cublasScalEx(
context->cublas_handle(),
N,
&alpha,
CUDA_R_16F,
y,
CUDA_R_16F,
1,
CUDA_R_32F));
}
template <>
CAFFE2_CUDA_EXPORT void Scale<at::Half, at::Half, CUDAContext>(
const int N,
const at::Half* alpha,
const at::Half* x,
at::Half* y,
CUDAContext* context) {
if (N == 0) {
return;
}
if (x != y) {
cudaMemcpyAsync(
y,
x,
sizeof(at::Half) * N,
cudaMemcpyDeviceToDevice,
context->cuda_stream());
}
CUBLAS_ENFORCE(cublasSetPointerMode(
context->cublas_handle(), CUBLAS_POINTER_MODE_DEVICE));
CUBLAS_ENFORCE(cublasScalEx(
context->cublas_handle(),
N,
alpha,
CUDA_R_16F,
y,
CUDA_R_16F,
1,
CUDA_R_32F));
}
template <>
CAFFE2_CUDA_EXPORT void Scale<float, at::Half, CUDAContext>(
const int N,
const float alpha,
const at::Half* x,
at::Half* y,
CUDAContext* context) {
if (N == 0) {
return;
}
if (x != y) {
cudaMemcpyAsync(
y,
x,
sizeof(at::Half) * N,
cudaMemcpyDeviceToDevice,
context->cuda_stream());
}
if (alpha != 1.0f) {
CUBLAS_ENFORCE(cublasSetPointerMode(
context->cublas_handle(), CUBLAS_POINTER_MODE_HOST));
CUBLAS_ENFORCE(cublasScalEx(
context->cublas_handle(),
N,
&alpha,
CUDA_R_32F,
y,
CUDA_R_16F,
1,
CUDA_R_32F));
}
}
template <>
CAFFE2_CUDA_EXPORT void Scale<float, at::Half, CUDAContext>(
const int N,
const float* alpha,
const at::Half* x,
at::Half* y,
CUDAContext* context) {
if (N == 0) {
return;
}
if (x != y) {
cudaMemcpyAsync(
y,
x,
sizeof(at::Half) * N,
cudaMemcpyDeviceToDevice,
context->cuda_stream());
}
CUBLAS_ENFORCE(cublasSetPointerMode(
context->cublas_handle(), CUBLAS_POINTER_MODE_DEVICE));
CUBLAS_ENFORCE(cublasScalEx(
context->cublas_handle(),
N,
alpha,
CUDA_R_32F,
y,
CUDA_R_16F,
1,
CUDA_R_32F));
}
template <>
CAFFE2_CUDA_EXPORT void Axpy<float, CUDAContext>(
const int N,
const float alpha,
const float* X,
float* Y,
CUDAContext* context) {
CUBLAS_ENFORCE(
cublasSetPointerMode(context->cublas_handle(), CUBLAS_POINTER_MODE_HOST));
CUBLAS_ENFORCE(cublasSaxpy(context->cublas_handle(), N, &alpha, X, 1, Y, 1));
}
template <>
CAFFE2_CUDA_EXPORT void Axpy<double, CUDAContext>(
const int N,
const float alpha,
const double* X,
double* Y,
CUDAContext* context) {
double alpha_d{alpha};
CUBLAS_ENFORCE(
cublasSetPointerMode(context->cublas_handle(), CUBLAS_POINTER_MODE_HOST));
CUBLAS_ENFORCE(
cublasDaxpy(context->cublas_handle(), N, &alpha_d, X, 1, Y, 1));
}
template <>
CAFFE2_CUDA_EXPORT void Axpy<at::Half, CUDAContext>(
const int N,
const float alpha,
const at::Half* X,
at::Half* Y,
CUDAContext* context) {
CUBLAS_ENFORCE(
cublasSetPointerMode(context->cublas_handle(), CUBLAS_POINTER_MODE_HOST));
CUBLAS_ENFORCE(cublasAxpyEx(
context->cublas_handle(),
N,
&alpha,
CUDA_R_32F,
X,
CUDA_R_16F,
1,
Y,
CUDA_R_16F,
1,
CUDA_R_32F));
}
template <>
CAFFE2_CUDA_EXPORT void Axpy<float, CUDAContext>(
const int N,
const float* alpha,
const float* X,
float* Y,
CUDAContext* context) {
CUBLAS_ENFORCE(cublasSetPointerMode(
context->cublas_handle(), CUBLAS_POINTER_MODE_DEVICE));
CUBLAS_ENFORCE(cublasSaxpy(context->cublas_handle(), N, alpha, X, 1, Y, 1));
}
template <>
CAFFE2_CUDA_EXPORT void Axpy<at::Half, CUDAContext>(
const int N,
const float* alpha,
const at::Half* X,
at::Half* Y,
CUDAContext* context) {
CUBLAS_ENFORCE(cublasSetPointerMode(
context->cublas_handle(), CUBLAS_POINTER_MODE_DEVICE));
CUBLAS_ENFORCE(cublasAxpyEx(
context->cublas_handle(),
N,
alpha,
CUDA_R_32F,
X,
CUDA_R_16F,
1,
Y,
CUDA_R_16F,
1,
CUDA_R_32F));
}
namespace {
template <typename TCoeff, typename TData>
__global__ void AxpbyCUDAKernel(
const int N,
const TCoeff a,
const TData* x,
const TCoeff b,
TData* y) {
CUDA_1D_KERNEL_LOOP(i, N) {
#if __CUDA_ARCH__ >= 350
y[i] = __ldg(x + i) * a + y[i] * b;
#else
y[i] = x[i] * a + y[i] * b;
#endif
}
}
template <>
__global__ void AxpbyCUDAKernel<float, at::Half>(
const int N,
const float a,
const at::Half* x,
const float b,
at::Half* y) {
CUDA_1D_KERNEL_LOOP(i, N) {
y[i] = convert::To<float, at::Half>(
convert::To<at::Half, float>(x[i]) * a +
convert::To<at::Half, float>(y[i]) * b);
}
}
template <typename TCoeff, typename TData>
__global__ void AxpbyCUDAKernel(
const int N,
const TCoeff* a,
const TData* x,
const TCoeff* b,
TData* y) {
CUDA_1D_KERNEL_LOOP(i, N) {
#if __CUDA_ARCH__ >= 350
y[i] = __ldg(x + i) * __ldg(a) + y[i] * __ldg(b);
#else
y[i] = x[i] * *a + y[i] * *b;
#endif
}
}
template <>
__global__ void AxpbyCUDAKernel<float, at::Half>(
const int N,
const float* a,
const at::Half* x,
const float* b,
at::Half* y) {
CUDA_1D_KERNEL_LOOP(i, N) {
#if __CUDA_ARCH__ >= 350
y[i] = convert::To<float, at::Half>(
convert::To<at::Half, float>(x[i]) * __ldg(a) +
convert::To<at::Half, float>(y[i]) * __ldg(b));
#else
y[i] = convert::To<float, at::Half>(
convert::To<at::Half, float>(x[i]) * *a +
convert::To<at::Half, float>(y[i]) * *b);
#endif
}
}
} // namespace
#define CAFFE2_SPECIALIZED_CUDA_AXPBY(TCoeff, TData) \
template <> \
CAFFE2_CUDA_EXPORT void Axpby<TCoeff, TData, CUDAContext>( \
const int n, \
const TCoeff a, \
const TData* x, \
const TCoeff b, \
TData* y, \
CUDAContext* context) { \
AxpbyCUDAKernel<TCoeff, TData> \
<<<CAFFE_GET_BLOCKS(n), \
CAFFE_CUDA_NUM_THREADS, \
0, \
context->cuda_stream()>>>(n, a, x, b, y); \
} \
template <> \
CAFFE2_CUDA_EXPORT void Axpby<TCoeff, TData, CUDAContext>( \
const int n, \
const TCoeff* a, \
const TData* x, \
const TCoeff* b, \
TData* y, \
CUDAContext* context) { \
AxpbyCUDAKernel<TCoeff, TData> \
<<<CAFFE_GET_BLOCKS(n), \
CAFFE_CUDA_NUM_THREADS, \
0, \
context->cuda_stream()>>>(n, a, x, b, y); \
}
CAFFE2_SPECIALIZED_CUDA_AXPBY(float, float)
CAFFE2_SPECIALIZED_CUDA_AXPBY(float, at::Half)
#undef CAFFE2_SPECIALIZED_CUDA_AXPBY
namespace {
template <typename T>
__global__ void Im2ColNCHWCUDAKernel(
const int n,
const int input_h,
const int input_w,
const int kernel_h,
const int kernel_w,
const int dilation_h,
const int dilation_w,
const int pad_t,
const int pad_l,
const int stride_h,
const int stride_w,
const int output_h,
const int output_w,
const T* img_data,
T* col_data) {
CUDA_1D_KERNEL_LOOP(index, n) {
const int w_out = index % output_w;
const int h_index = index / output_w;
const int h_out = h_index % output_h;
const int channel_in = h_index / output_h;
const int channel_out = channel_in * kernel_h * kernel_w;
const int h_in = h_out * stride_h - pad_t;
const int w_in = w_out * stride_w - pad_l;
const int output_size = output_h * output_w;
T* col_data_ptr =
col_data + (channel_out * output_h + h_out) * output_w + w_out;
const T* img_data_ptr =
img_data + (channel_in * input_h + h_in) * input_w + w_in;
int dh = 0;
for (int i = 0; i < kernel_h; ++i) {
int dw = 0;
for (int j = 0; j < kernel_w; ++j) {
const int h = h_in + dh;
const int w = w_in + dw;
#if __CUDA_ARCH__ >= 350
*col_data_ptr = utils::IsAGeZeroAndALtB(h, input_h) &&
utils::IsAGeZeroAndALtB(w, input_w)
? __ldg(img_data_ptr + dh * input_w + dw)
: 0;
#else
*col_data_ptr = utils::IsAGeZeroAndALtB(h, input_h) &&
utils::IsAGeZeroAndALtB(w, input_w)
? img_data_ptr[dh * input_w + dw]
: 0;
#endif
col_data_ptr += output_size;
dw += dilation_w;
}
dh += dilation_h;
}
}
}
template <typename T>
__global__ void Im2ColNHWCCUDAKernel(
const int n,
const int input_h,
const int input_w,
const int kernel_h,
const int kernel_w,
const int dilation_h,
const int dilation_w,
const int pad_t,
const int pad_l,
const int stride_h,
const int stride_w,
const int output_w,
const int channels,
const T* img_data,
T* col_data) {
CUDA_1D_KERNEL_LOOP(index, n) {
const int channel_in = index % channels;
const int w_out = index / channels % output_w;
const int h_out = index / channels / output_w;
const int h_in = h_out * stride_h - pad_t;
const int w_in = w_out * stride_w - pad_l;
T* col_data_ptr = col_data +
(h_out * output_w + w_out) * channels * kernel_h * kernel_w +
channel_in;
int dh = 0;
for (int i = 0; i < kernel_h; ++i) {
int dw = 0;
for (int j = 0; j < kernel_w; ++j) {
const int h = h_in + dh;
const int w = w_in + dw;
#if __CUDA_ARCH__ >= 350
*col_data_ptr = utils::IsAGeZeroAndALtB(h, input_h) &&
utils::IsAGeZeroAndALtB(w, input_w)
? __ldg(img_data + (h * input_w + w) * channels + channel_in)
: 0;
#else
*col_data_ptr = utils::IsAGeZeroAndALtB(h, input_h) &&
utils::IsAGeZeroAndALtB(w, input_w)
? img_data[(h * input_w + w) * channels + channel_in]
: 0;
#endif
col_data_ptr += channels;
dw += dilation_w;
}
dh += dilation_h;
}
}
}
template <typename T>
__global__ void Col2ImNCHWCUDAKernel(
const int n,
const int input_h,
const int input_w,
const int patch_h,
const int patch_w,
const int dilation_h,
const int dilation_w,
const int pad_t,
const int pad_l,
const int stride_h,
const int stride_w,
const int output_h,
const int output_w,
const T* col_data,
T* img_data) {
const int dpatch_h = dilation_h * (patch_h - 1) + 1;
const int dpatch_w = dilation_w * (patch_w - 1) + 1;
CUDA_1D_KERNEL_LOOP(index, n) {
T val = 0;
const int w = index % input_w + pad_l;
const int h = index / input_w % input_h + pad_t;
const int c = index / (input_h * input_w);
// compute the start and end of the output
const int w_col_start = (w < dpatch_w) ? 0 : (w - dpatch_w) / stride_w + 1;
const int w_col_end = min(w / stride_w + 1, output_w);
const int h_col_start = (h < dpatch_h) ? 0 : (h - dpatch_h) / stride_h + 1;
const int h_col_end = min(h / stride_h + 1, output_h);
for (int h_col = h_col_start; h_col < h_col_end; ++h_col) {
for (int w_col = w_col_start; w_col < w_col_end; ++w_col) {
int h_k = (h - h_col * stride_h);
int w_k = (w - w_col * stride_w);
if (h_k % dilation_h == 0 && w_k % dilation_w == 0) {
h_k /= dilation_h;
w_k /= dilation_w;
const int col_data_index =
(((c * patch_h + h_k) * patch_w + w_k) * output_h + h_col) *
output_w +
w_col;
#if __CUDA_ARCH__ >= 350
val += __ldg(col_data + col_data_index);
#else
val += col_data[col_data_index];
#endif
}
}
}
img_data[index] = val;
}
}
template <typename T>
__global__ void Col2ImNHWCCUDAKernel(
const int n,
const int input_w,
const int channels,
const int patch_h,
const int patch_w,
const int dilation_h,
const int dilation_w,
const int pad_t,
const int pad_l,
const int stride_h,
const int stride_w,
const int output_h,
const int output_w,
const T* col_data,
T* img_data) {
const int dpatch_h = dilation_h * (patch_h - 1) + 1;
const int dpatch_w = dilation_w * (patch_w - 1) + 1;
CUDA_1D_KERNEL_LOOP(index, n) {
T val = 0;
const int c = index % channels;
const int w = index / channels % input_w + pad_l;
const int h = index / channels / input_w + pad_t;
// compute the start and end of the output
const int w_col_start = (w < dpatch_w) ? 0 : (w - dpatch_w) / stride_w + 1;
const int w_col_end = min(w / stride_w + 1, output_w);
const int h_col_start = (h < dpatch_h) ? 0 : (h - dpatch_h) / stride_h + 1;
const int h_col_end = min(h / stride_h + 1, output_h);
const int channels_col = patch_h * patch_w * channels;
for (int h_col = h_col_start; h_col < h_col_end; ++h_col) {
for (int w_col = w_col_start; w_col < w_col_end; ++w_col) {
int h_k = h - h_col * stride_h;
int w_k = w - w_col * stride_w;
if (h_k % dilation_h == 0 && w_k % dilation_w == 0) {
h_k /= dilation_h;
w_k /= dilation_w;
const int c_col = (h_k * patch_w + w_k) * channels + c;
#if __CUDA_ARCH__ >= 350
val += __ldg(
col_data + (h_col * output_w + w_col) * channels_col + c_col);
#else
val += col_data[(h_col * output_w + w_col) * channels_col + c_col];
#endif
}
}
}
img_data[index] = val;
}
}
template <typename T, int N, bool kCol2Im>
__global__ void Im2ColNdNCHWCUDAKernel(
const int outer_size,
const int inner_size,
const int kernel_size,
SimpleArray<int, N + 1> img_shape,
SimpleArray<int, N + 1> col_shape,
SimpleArray<int, N> kernel_shape,
SimpleArray<int, N> stride,
SimpleArray<int, N> dilation,
SimpleArray<int, N> pad,
const T* X_data,
T* Y_data) {
int d_offset[N];
int d_iter[N];
for (int i = blockIdx.x; i < outer_size; i += gridDim.x) {
int offset_i = i;
#pragma unroll
for (int d_i = N - 1; d_i >= 0; --d_i) {
d_offset[d_i] = offset_i % kernel_shape.data[d_i];
offset_i /= kernel_shape.data[d_i];
}
for (int j = threadIdx.x; j < inner_size; j += blockDim.x) {
int offset_j = j;
#pragma unroll
for (int d_i = N - 1; d_i >= 0; --d_i) {
d_iter[d_i] = offset_j % col_shape.data[d_i + 1];
offset_j /= col_shape.data[d_i + 1];
}
const int col_index = i * inner_size + j;
int img_index = i / kernel_size;
bool is_padding = false;
#pragma unroll
for (int d_i = 0; d_i < N; ++d_i) {
const int d_img = d_iter[d_i] * stride.data[d_i] - pad.data[d_i] +
d_offset[d_i] * dilation.data[d_i];
is_padding |= !utils::IsAGeZeroAndALtB(d_img, img_shape.data[d_i + 1]);
img_index = img_index * img_shape.data[d_i + 1] + d_img;
}
#if __CUDA_ARCH__ >= 350
if (!kCol2Im) {
Y_data[col_index] = is_padding ? 0 : __ldg(X_data + img_index);
} else if (!is_padding) {
atomicAdd(Y_data + img_index, __ldg(X_data + col_index));
}
#else
if (!kCol2Im) {
Y_data[col_index] = is_padding ? 0 : X_data[img_index];
} else if (!is_padding) {
atomicAdd(Y_data + img_index, X_data[col_index]);
}
#endif
}
}
}
template <typename T, int N>
CAFFE2_CUDA_EXPORT void Im2ColNdNCHWCUDAImpl(
const int img_size,
const int col_size,
const int* img_shape,
const int* col_shape,
const int* kernel_shape,
const int* stride,
const int* dilation,
const int* pad,
const float* img_data,
float* col_data,
CUDAContext* context) {
const int outer_size = col_shape[0];
const int inner_size = col_size / outer_size;
const int kernel_size = std::accumulate(
kernel_shape, kernel_shape + N, 1, std::multiplies<int>());
SimpleArray<int, N + 1> img_shape_array;
SimpleArray<int, N + 1> col_shape_array;
SimpleArray<int, N> kernel_shape_array;
SimpleArray<int, N> stride_array;
SimpleArray<int, N> dilation_array;
SimpleArray<int, N> pad_array;
std::memcpy(img_shape_array.data, img_shape, (N + 1) * sizeof(int));
std::memcpy(col_shape_array.data, col_shape, (N + 1) * sizeof(int));
std::memcpy(kernel_shape_array.data, kernel_shape, N * sizeof(int));
std::memcpy(stride_array.data, stride, N * sizeof(int));
std::memcpy(dilation_array.data, dilation, N * sizeof(int));
std::memcpy(pad_array.data, pad, N * sizeof(int));
Im2ColNdNCHWCUDAKernel<T, N, false>
<<<std::min(outer_size, CAFFE_MAXIMUM_NUM_BLOCKS),
CAFFE_CUDA_NUM_THREADS,
0,
context->cuda_stream()>>>(
outer_size,
inner_size,
kernel_size,
img_shape_array,
col_shape_array,
kernel_shape_array,
stride_array,
dilation_array,
pad_array,
img_data,
col_data);
}
template <typename T, int N>
CAFFE2_CUDA_EXPORT void Col2ImNdNCHWCUDAImpl(
const int img_size,
const int col_size,
const int* img_shape,
const int* col_shape,
const int* kernel_shape,
const int* stride,
const int* dilation,
const int* pad,
const float* col_data,
float* img_data,
CUDAContext* context) {
const int outer_size = col_shape[0];
const int inner_size = col_size / outer_size;
const int kernel_size = std::accumulate(
kernel_shape, kernel_shape + N, 1, std::multiplies<int>());
SimpleArray<int, N + 1> img_shape_array;
SimpleArray<int, N + 1> col_shape_array;
SimpleArray<int, N> kernel_shape_array;
SimpleArray<int, N> stride_array;
SimpleArray<int, N> dilation_array;
SimpleArray<int, N> pad_array;
std::memcpy(img_shape_array.data, img_shape, (N + 1) * sizeof(int));
std::memcpy(col_shape_array.data, col_shape, (N + 1) * sizeof(int));
std::memcpy(kernel_shape_array.data, kernel_shape, N * sizeof(int));
std::memcpy(stride_array.data, stride, N * sizeof(int));
std::memcpy(dilation_array.data, dilation, N * sizeof(int));
std::memcpy(pad_array.data, pad, N * sizeof(int));
Set<T, CUDAContext>(img_size, 0, img_data, context);
Im2ColNdNCHWCUDAKernel<T, N, true>
<<<std::min(outer_size, CAFFE_MAXIMUM_NUM_BLOCKS),
CAFFE_CUDA_NUM_THREADS,
0,
context->cuda_stream()>>>(
outer_size,
inner_size,
kernel_size,
img_shape_array,
col_shape_array,
kernel_shape_array,
stride_array,
dilation_array,
pad_array,
col_data,
img_data);
}
} // namespace
template <>
CAFFE2_CUDA_EXPORT void Im2Col<float, CUDAContext, StorageOrder::NCHW>(
const int channels,
const int height,
const int width,
const int kernel_h,
const int kernel_w,
const int dilation_h,
const int dilation_w,
const int pad_t,
const int pad_l,
const int pad_b,
const int pad_r,
const int stride_h,
const int stride_w,
const float* img_data,
float* col_data,
CUDAContext* context,
const int /* groups */) {
const int dkernel_h = dilation_h * (kernel_h - 1) + 1;
const int dkernel_w = dilation_w * (kernel_w - 1) + 1;
const int output_h = (height + pad_t + pad_b - dkernel_h) / stride_h + 1;
const int output_w = (width + pad_l + pad_r - dkernel_w) / stride_w + 1;
const int num_kernels = channels * output_h * output_w;
Im2ColNCHWCUDAKernel<float>
<<<CAFFE_GET_BLOCKS(num_kernels),
CAFFE_CUDA_NUM_THREADS,
0,
context->cuda_stream()>>>(
num_kernels,
height,
width,
kernel_h,
kernel_w,
dilation_h,
dilation_w,
pad_t,
pad_l,
stride_h,
stride_w,
output_h,
output_w,
img_data,
col_data);
}
template <>
CAFFE2_CUDA_EXPORT void Im2Col<float, CUDAContext, StorageOrder::NHWC>(
const int channels,
const int height,
const int width,
const int kernel_h,
const int kernel_w,
const int dilation_h,
const int dilation_w,
const int pad_t,
const int pad_l,
const int pad_b,
const int pad_r,
const int stride_h,
const int stride_w,
const float* img_data,
float* col_data,
CUDAContext* context,
const int groups) {
CAFFE_ENFORCE_EQ(groups, 1, "groups must be 1 for GPU NHWC Im2Col");
const int dkernel_h = dilation_h * (kernel_h - 1) + 1;
const int dkernel_w = dilation_w * (kernel_w - 1) + 1;
const int output_h = (height + pad_t + pad_b - dkernel_h) / stride_h + 1;
const int output_w = (width + pad_l + pad_r - dkernel_w) / stride_w + 1;
const int num_kernels = output_h * output_w * channels;
Im2ColNHWCCUDAKernel<float>
<<<CAFFE_GET_BLOCKS(num_kernels),
CAFFE_CUDA_NUM_THREADS,
0,
context->cuda_stream()>>>(
num_kernels,
height,
width,
kernel_h,
kernel_w,
dilation_h,
dilation_w,
pad_t,
pad_l,
stride_h,
stride_w,
output_w,
channels,
img_data,
col_data);
}
template <>
CAFFE2_CUDA_EXPORT void Col2Im<float, CUDAContext, StorageOrder::NCHW>(
const int channels,
const int height,
const int width,
const int kernel_h,
const int kernel_w,
const int dilation_h,
const int dilation_w,
const int pad_t,
const int pad_l,
const int pad_b,
const int pad_r,
const int stride_h,
const int stride_w,
const float* col_data,
float* img_data,
CUDAContext* context,
const int /* groups */) {
const int dkernel_h = dilation_h * (kernel_h - 1) + 1;
const int dkernel_w = dilation_w * (kernel_w - 1) + 1;
const int output_h = (height + pad_t + pad_b - dkernel_h) / stride_h + 1;
const int output_w = (width + pad_l + pad_r - dkernel_w) / stride_w + 1;
const int num_kernels = channels * height * width;
Col2ImNCHWCUDAKernel<float>
<<<CAFFE_GET_BLOCKS(num_kernels),
CAFFE_CUDA_NUM_THREADS,
0,
context->cuda_stream()>>>(
num_kernels,
height,
width,
kernel_h,
kernel_w,
dilation_h,
dilation_w,
pad_t,
pad_l,
stride_h,
stride_w,
output_h,
output_w,
col_data,
img_data);
}
template <>
CAFFE2_CUDA_EXPORT void Col2Im<float, CUDAContext, StorageOrder::NHWC>(
const int channels,
const int height,
const int width,
const int kernel_h,
const int kernel_w,
const int dilation_h,
const int dilation_w,
const int pad_t,
const int pad_l,
const int pad_b,
const int pad_r,
const int stride_h,
const int stride_w,
const float* col_data,
float* img_data,
CUDAContext* context,
const int groups) {
CAFFE_ENFORCE_EQ(groups, 1, "groups must be 1 for GPU NHWC Col2Im");
const int dkernel_h = dilation_h * (kernel_h - 1) + 1;
const int dkernel_w = dilation_w * (kernel_w - 1) + 1;
const int output_h = (height + pad_t + pad_b - dkernel_h) / stride_h + 1;
const int output_w = (width + pad_l + pad_r - dkernel_w) / stride_w + 1;
const int num_kernels = height * width * channels;
Col2ImNHWCCUDAKernel<float>
<<<CAFFE_GET_BLOCKS(num_kernels),
CAFFE_CUDA_NUM_THREADS,
0,
context->cuda_stream()>>>(
num_kernels,
width,
channels,
kernel_h,
kernel_w,
dilation_h,
dilation_w,
pad_t,
pad_l,
stride_h,
stride_w,
output_h,
output_w,
col_data,
img_data);
}
template <>
CAFFE2_CUDA_EXPORT void Im2ColNd<float, CUDAContext, StorageOrder::NCHW>(
const int N,
const int img_size,
const int col_size,
const int* img_shape,
const int* col_shape,
const int* kernel_shape,
const int* stride,
const int* dilation,
const int* pad,
const float* img_data,
float* col_data,
CUDAContext* context) {
DISPATCH_FUNCTION_BY_VALUE_WITH_TYPE_1(
N,
Im2ColNdNCHWCUDAImpl,
float,
img_size,
col_size,
img_shape,
col_shape,
kernel_shape,
stride,
dilation,
pad,
img_data,
col_data,
context);
}
template <>
CAFFE2_CUDA_EXPORT void Col2ImNd<float, CUDAContext, StorageOrder::NCHW>(
const int N,
const int img_size,
const int col_size,
const int* img_shape,
const int* col_shape,
const int* kernel_shape,
const int* stride,
const int* dilation,
const int* pad,
const float* col_data,
float* img_data,
CUDAContext* context) {
DISPATCH_FUNCTION_BY_VALUE_WITH_TYPE_1(
N,
Col2ImNdNCHWCUDAImpl,
float,
img_size,
col_size,
img_shape,
col_shape,
kernel_shape,
stride,
dilation,
pad,
col_data,
img_data,
context);
}
template <>
CAFFE2_CUDA_EXPORT void CopyMatrix<CUDAContext>(
const size_t itemsize,
const int M,
const int N,
const void* A,
const int lda,
void* B,
const int ldb,
CUDAContext* context,
TypeMeta::TypedCopy copy) {
CAFFE_ENFORCE(!copy, "Copy constructor is not supported in CUDA context");
cudaMemcpy2DAsync(
B,
ldb * itemsize,
A,
lda * itemsize,
N * itemsize,
M,
cudaMemcpyDeviceToDevice,
context->cuda_stream());
}
#define CAFFE2_SPECIALIZED_CUDA_COPY_MATRIX(T) \
template <> \
void CopyMatrix<T, CUDAContext>( \
const int M, \
const int N, \
const T* A, \
const int lda, \
T* B, \
const int ldb, \
CUDAContext* context) { \
if (M == 0 || N == 0) { \
return; \
} \
cudaMemcpy2DAsync( \
B, \
sizeof(T) * ldb, \
A, \
sizeof(T) * lda, \
sizeof(T) * N, \
M, \
cudaMemcpyDeviceToDevice, \
context->cuda_stream()); \
}
CAFFE2_SPECIALIZED_CUDA_COPY_MATRIX(float)
CAFFE2_SPECIALIZED_CUDA_COPY_MATRIX(double)
CAFFE2_SPECIALIZED_CUDA_COPY_MATRIX(int)
CAFFE2_SPECIALIZED_CUDA_COPY_MATRIX(TIndex)
#undef CAFFE2_SPECIALIZED_CUDA_COPY_MATRIX
template <>
CAFFE2_CUDA_EXPORT void CopyVector<float, CUDAContext>(
const int N,
const float* src,
float* dst,
CUDAContext* context) {
if (src != dst && N > 0) {
cudaMemcpyAsync(
dst,
src,
sizeof(float) * N,
cudaMemcpyDeviceToDevice,
context->cuda_stream());
}
}
namespace {
template <typename T>
using BlockReduce = cub::BlockReduce<T, CAFFE_CUDA_NUM_THREADS>;
template <typename T, class Reducer>
__global__ void RowwiseReduceKernel(
const int rows,
const int cols,
const Reducer reducer,
const T init,
const T alpha,
const T* X,
T* Y) {
__shared__ typename BlockReduce<T>::TempStorage temp_storage;
for (int i = blockIdx.x; i < rows; i += gridDim.x) {
T val = init;
for (int j = threadIdx.x; j < cols; j += blockDim.x) {
val = reducer(X[i * cols + j], val);
}
val = BlockReduce<T>(temp_storage).Reduce(val, reducer);
if (threadIdx.x == 0) {
Y[i] = val * alpha;
}
__syncthreads();
}
}
template <typename T, class Reducer>
__global__ void ColwiseReduceKernel(
const int rows,
const int cols,
const Reducer reducer,
const T init,
const T alpha,
const T* X,
T* Y) {
__shared__ typename BlockReduce<T>::TempStorage temp_storage;
for (int i = blockIdx.x; i < cols; i += gridDim.x) {
T val = init;
for (int j = threadIdx.x; j < rows; j += blockDim.x) {
val = reducer(X[j * cols + i], val);
}
val = BlockReduce<T>(temp_storage).Reduce(val, reducer);
if (threadIdx.x == 0) {
Y[i] = val * alpha;
}
__syncthreads();
}
}
} // namespace
#define CAFFE2_SPECIALIZED_CUDA_ROWWISE_MAX(T) \
template <> \
CAFFE2_CUDA_EXPORT void RowwiseMax<T, CUDAContext>( \
const int N, const int D, const T* x, T* y, CUDAContext* context) { \
RowwiseReduceKernel<<< \
std::min(N, CAFFE_MAXIMUM_NUM_BLOCKS), \
CAFFE_CUDA_NUM_THREADS, \
0, \
context->cuda_stream()>>>( \
N, D, cub::Max(), std::numeric_limits<T>::lowest(), T(1), x, y); \
}
CAFFE2_SPECIALIZED_CUDA_ROWWISE_MAX(float)
#undef CAFFE2_SPECIALIZED_CUDA_ROWWISE_MAX
#define CAFFE2_SPECIALIZED_CUDA_COLWISE_MAX(T) \
template <> \
CAFFE2_CUDA_EXPORT void ColwiseMax<T, CUDAContext>( \
const int N, const int D, const T* x, T* y, CUDAContext* context) { \
ColwiseReduceKernel<<< \
std::min(D, CAFFE_MAXIMUM_NUM_BLOCKS), \
CAFFE_CUDA_NUM_THREADS, \
0, \
context->cuda_stream()>>>( \
N, D, cub::Max(), std::numeric_limits<T>::lowest(), T(1), x, y); \
}
CAFFE2_SPECIALIZED_CUDA_COLWISE_MAX(float)
#undef CAFFE2_SPECIALIZED_CUDA_COLWISE_MAX
namespace {
__global__ void
maximum_kernel(const int N, const float alpha, const float* x, float* y) {
CUDA_1D_KERNEL_LOOP(i, N) {
y[i] = fmaxf(x[i], alpha);
}
}
} // namespace
template <>
CAFFE2_CUDA_EXPORT void Maximum(
const int N,
const float alpha,
const float* x,
float* y,
CUDAContext* context) {
maximum_kernel<<<
std::min(N, CAFFE_MAXIMUM_NUM_BLOCKS),
CAFFE_CUDA_NUM_THREADS,
0,
context->cuda_stream()>>>(N, alpha, x, y);
}
namespace {
template <typename T, class Reducer, int D>
__global__ void ReduceTensorCUDAKernel(
const int outer_size,
const int inner_size,
SimpleArray<int, D> X_strides,
SimpleArray<FixedDivisor<int>, D> Y_dims,
const Reducer reducer,
const T init,
const T alpha,
const T* X,
T* Y) {
__shared__ typename BlockReduce<T>::TempStorage temp_storage;
for (int i = blockIdx.x; i < outer_size; i += gridDim.x) {
T val = init;
for (int j = threadIdx.x; j < inner_size; j += blockDim.x) {
int X_index = 0;
int Y_index = i * inner_size + j;
#pragma unroll
for (int d = D - 1; d >= 0; --d) {
int r;
Y_dims.data[d].DivMod(Y_index, &Y_index, &r);
X_index += r * X_strides.data[d];
}
#if __CUDA_ARCH__ >= 350
val = reducer(val, __ldg(X + X_index));
#else
val = reducer(val, X[X_index]);
#endif
}
val = BlockReduce<T>(temp_storage).Reduce(val, reducer);
if (threadIdx.x == 0) {
Y[i] = val * alpha;
}
__syncthreads();
}
}
template <typename T, class Reducer, int D>
CAFFE2_CUDA_EXPORT void ReduceTensorCUDAImpl(
const int outer_size,
const int inner_size,
const int* dims,
const int* axes,
const Reducer& reducer,
const T init,
const T alpha,
const T* X,
T* Y,
CUDAContext* context) {
SimpleArray<int, D> X_strides;
SimpleArray<FixedDivisor<int>, D> Y_dims;
utils::ComputeTransposedStrides(D, dims, axes, X_strides.data);
for (int i = 0; i < D; ++i) {
Y_dims.data[i] = FixedDivisor<int>(dims[axes[i]]);
}
ReduceTensorCUDAKernel<T, Reducer, D>
<<<std::min(outer_size, CAFFE_MAXIMUM_NUM_BLOCKS),
CAFFE_CUDA_NUM_THREADS,
0,
context->cuda_stream()>>>(
outer_size,
inner_size,
X_strides,
Y_dims,
reducer,
init,
alpha,
X,
Y);
}
template <typename T, class Reducer>
CAFFE2_CUDA_EXPORT void ReduceTensorCUDA(
const int num_dims,
const int* dims,
const int num_axes,
const int* axes,
const Reducer& reducer,
const T init,
const T alpha,
const T* X,
T* Y,
CUDAContext* context) {
CAFFE_ENFORCE_LE(num_axes, num_dims);
std::vector<int> Y_dims_vector(dims, dims + num_dims);
for (int i = 0; i < num_axes; ++i) {
Y_dims_vector[axes[i]] = 1;
}
const int* X_dims = dims;
const int* Y_dims = Y_dims_vector.data();
const int X_size =
std::accumulate(X_dims, X_dims + num_dims, 1, std::multiplies<int>());
const int Y_size =
std::accumulate(Y_dims, Y_dims + num_dims, 1, std::multiplies<int>());
if (X_size == 0) {
Set<T, CUDAContext>(Y_size, alpha * init, Y, context);
return;
}
if (alpha == T(0)) {
Set<T, CUDAContext>(Y_size, T(0), Y, context);
return;
}
if (std::equal(X_dims, X_dims + num_dims, Y_dims)) {
Scale<T, T, CUDAContext>(X_size, alpha, X, Y, context);
return;
}
int rows;
int cols;
if (utils::IsRowwiseReduce(num_dims, X_dims, Y_dims, &rows, &cols)) {
RowwiseReduceKernel<T>
<<<std::min(rows, CAFFE_MAXIMUM_NUM_BLOCKS),
CAFFE_CUDA_NUM_THREADS,
0,
context->cuda_stream()>>>(rows, cols, reducer, init, alpha, X, Y);
return;
}
if (utils::IsColwiseReduce(num_dims, X_dims, Y_dims, &rows, &cols)) {
ColwiseReduceKernel<T>
<<<std::min(rows, CAFFE_MAXIMUM_NUM_BLOCKS),
CAFFE_CUDA_NUM_THREADS,
0,
context->cuda_stream()>>>(rows, cols, reducer, init, alpha, X, Y);
return;
}
std::vector<int> transpose_axes(num_dims);
utils::ComputeTransposeAxesForReduceOp(
num_dims, num_axes, axes, transpose_axes.data());
const int outer_size = Y_size;
const int inner_size = X_size / Y_size;
DISPATCH_FUNCTION_BY_VALUE_WITH_TYPE_2(
num_dims,
ReduceTensorCUDAImpl,
T,
Reducer,
outer_size,
inner_size,
dims,
transpose_axes.data(),
reducer,
init,
alpha,
X,
Y,
context);
}
} // namespace
#define CAFFE2_SPECIALIZED_CUDA_REDUCE_MIN(T) \
template <> \
CAFFE2_CUDA_EXPORT void ReduceMin<T, CUDAContext>( \
const int num_dims, \
const int* dims, \
const int num_axes, \
const int* axes, \
const T alpha, \
const T* X, \
T* Y, \
CUDAContext* context) { \
ReduceTensorCUDA( \
num_dims, \
dims, \
num_axes, \
axes, \
cub::Min(), \
std::numeric_limits<T>::max(), \
alpha, \
X, \
Y, \
context); \
}
CAFFE2_SPECIALIZED_CUDA_REDUCE_MIN(std::int32_t)
CAFFE2_SPECIALIZED_CUDA_REDUCE_MIN(std::int64_t)
CAFFE2_SPECIALIZED_CUDA_REDUCE_MIN(float)
CAFFE2_SPECIALIZED_CUDA_REDUCE_MIN(double)
#undef CAFFE2_SPECIALIZED_CUDA_REDUCE_MIN
#define CAFFE2_SPECIALIZED_CUDA_REDUCE_MAX(T) \
template <> \
CAFFE2_CUDA_EXPORT void ReduceMax<T, CUDAContext>( \
const int num_dims, \
const int* dims, \
const int num_axes, \
const int* axes, \
const T alpha, \
const T* X, \
T* Y, \
CUDAContext* context) { \
ReduceTensorCUDA( \
num_dims, \
dims, \
num_axes, \
axes, \
cub::Max(), \
std::numeric_limits<T>::lowest(), \
alpha, \
X, \
Y, \
context); \
}
CAFFE2_SPECIALIZED_CUDA_REDUCE_MAX(std::int32_t)
CAFFE2_SPECIALIZED_CUDA_REDUCE_MAX(std::int64_t)
CAFFE2_SPECIALIZED_CUDA_REDUCE_MAX(float)
CAFFE2_SPECIALIZED_CUDA_REDUCE_MAX(double)
#undef CAFFE2_SPECIALIZED_CUDA_REDUCE_MAX
#define CAFFE2_SPECIALIZED_CUDA_REDUCE_SUM(T) \
template <> \
CAFFE2_CUDA_EXPORT void ReduceSum<T, CUDAContext>( \
const int num_dims, \
const int* dims, \
const int num_axes, \
const int* axes, \
const T alpha, \
const T* X, \
T* Y, \
CUDAContext* context) { \
ReduceTensorCUDA( \
num_dims, \
dims, \
num_axes, \
axes, \
cub::Sum(), \
T(0), \
alpha, \
X, \
Y, \
context); \
}
CAFFE2_SPECIALIZED_CUDA_REDUCE_SUM(std::int32_t)
CAFFE2_SPECIALIZED_CUDA_REDUCE_SUM(std::int64_t)
CAFFE2_SPECIALIZED_CUDA_REDUCE_SUM(float)
CAFFE2_SPECIALIZED_CUDA_REDUCE_SUM(double)
#undef CAFFE2_SPECIALIZED_CUDA_REDUCE_SUM
#define CAFFE2_SPECIALIZED_CUDA_REDUCE_MEAN(T) \
template <> \
CAFFE2_CUDA_EXPORT void ReduceMean<T, CUDAContext>( \
const int num_dims, \
const int* dims, \
const int num_axes, \
const int* axes, \
const T alpha, \
const T* X, \
T* Y, \
CUDAContext* context) { \
int scale = 1; \
for (int i = 0; i < num_axes; ++i) { \
scale *= dims[axes[i]]; \
} \
ReduceTensorCUDA( \
num_dims, \
dims, \
num_axes, \
axes, \
cub::Sum(), \
T(0), \
alpha / static_cast<T>(scale), \
X, \
Y, \
context); \
}
CAFFE2_SPECIALIZED_CUDA_REDUCE_MEAN(float)
#undef CAFFE2_SPECIALIZED_CUDA_REDUCE_MEAN
namespace {
template <typename T, int D>
__global__ void BroadcastCUDAKernel(
const int Y_size,
const SimpleArray<int, D> X_strides,
const SimpleArray<FixedDivisor<int>, D> Y_dims,
const T alpha,
const T* X,
T* Y) {
CUDA_1D_KERNEL_LOOP(Y_index, Y_size) {
int X_index = 0;
int Y_index_val = Y_index;
#pragma unroll
for (int i = D - 1; i >= 0; --i) {
int d;
Y_dims.data[i].DivMod(Y_index_val, &Y_index_val, &d);
X_index += d * X_strides.data[i];
}
#if __CUDA_ARCH__ >= 350
Y[Y_index] = __ldg(X + X_index) * alpha;
#else
Y[Y_index] = X[X_index] * alpha;
#endif
}
}
template <typename T, int D>
CAFFE2_CUDA_EXPORT void BroadcastCUDAImpl(
const int X_ndim,
const int* X_dims,
const int* Y_dims,
const T alpha,
const T* X,
T* Y,
CUDAContext* context) {
SimpleArray<int, D> X_strides_array;
SimpleArray<FixedDivisor<int>, D> Y_dims_array;
const int d = D - X_ndim;
std::fill(X_strides_array.data, X_strides_array.data + d, 0);
int cur_stride = 1;
for (int i = D - 1; i >= d; --i) {
CAFFE_ENFORCE(X_dims[i - d] == 1 || X_dims[i - d] == Y_dims[i]);
X_strides_array.data[i] = X_dims[i - d] == 1 ? 0 : cur_stride;
cur_stride *= X_dims[i - d];
}
for (int i = 0; i < D; ++i) {
if (Y_dims[i] == 0) {
return;
}
Y_dims_array.data[i] = FixedDivisor<int>(Y_dims[i]);
}
const int Y_size =
std::accumulate(Y_dims, Y_dims + D, 1, std::multiplies<int>());
BroadcastCUDAKernel<T, D>
<<<CAFFE_GET_BLOCKS(Y_size),
CAFFE_CUDA_NUM_THREADS,
0,
context->cuda_stream()>>>(
Y_size, X_strides_array, Y_dims_array, alpha, X, Y);
}
} // namespace
#define CAFFE2_SPECIALIZED_CUDA_BROADCAST(T) \
template <> \
CAFFE2_CUDA_EXPORT void Broadcast<T, CUDAContext>( \
const int X_ndim, \
const int* X_dims, \
const int Y_ndim, \
const int* Y_dims, \
const T alpha, \
const T* X, \
T* Y, \
CUDAContext* context) { \
CAFFE_ENFORCE_LE(X_ndim, Y_ndim); \
DISPATCH_FUNCTION_BY_VALUE_WITH_TYPE_1( \
Y_ndim, \
BroadcastCUDAImpl, \
T, \
X_ndim, \
X_dims, \
Y_dims, \
alpha, \
X, \
Y, \
context); \
}
CAFFE2_SPECIALIZED_CUDA_BROADCAST(std::int32_t)
CAFFE2_SPECIALIZED_CUDA_BROADCAST(std::int64_t)
CAFFE2_SPECIALIZED_CUDA_BROADCAST(float)
CAFFE2_SPECIALIZED_CUDA_BROADCAST(double)
#undef CAFFE2_SPECIALIZED_CUDA_BROADCAST
namespace {
template <typename T>
__global__ void RowwiseMomentsCUDAKernel(
const int rows,
const int cols,
const T* X,
T* mean,
T* variance) {
__shared__ typename BlockReduce<T>::TempStorage m_storage;
__shared__ typename BlockReduce<T>::TempStorage v_storage;
const T scale = T(1) / static_cast<T>(cols);
for (int i = blockIdx.x; i < rows; i += gridDim.x) {
T m_val = 0;
T v_val = 0;
for (int j = threadIdx.x; j < cols; j += blockDim.x) {
const int X_index = i * cols + j;
#if __CUDA_ARCH__ >= 350
m_val += __ldg(X + X_index);
v_val += __ldg(X + X_index) * __ldg(X + X_index);
#else
m_val += X[X_index];
v_val += X[X_index] * X[X_index];
#endif
}
m_val = BlockReduce<T>(m_storage).Sum(m_val);
v_val = BlockReduce<T>(v_storage).Sum(v_val);
if (threadIdx.x == 0) {
const T mu = m_val * scale;
mean[i] = mu;
variance[i] = v_val * scale - mu * mu;
}
__syncthreads();
}
}
template <typename T>
__global__ void ColwiseMomentsCUDAKernel(
const int rows,
const int cols,
const T* X,
T* mean,
T* variance) {
__shared__ typename BlockReduce<T>::TempStorage m_storage;
__shared__ typename BlockReduce<T>::TempStorage v_storage;
const T scale = T(1) / static_cast<T>(rows);
for (int i = blockIdx.x; i < cols; i += gridDim.x) {
T m_val = 0;
T v_val = 0;
for (int j = threadIdx.x; j < rows; j += blockDim.x) {
const int X_index = j * cols + i;
#if __CUDA_ARCH__ >= 350
m_val += __ldg(X + X_index);
v_val += __ldg(X + X_index) * __ldg(X + X_index);
#else
m_val += X[X_index];
v_val += X[X_index] * X[X_index];
#endif
}
m_val = BlockReduce<T>(m_storage).Sum(m_val);
v_val = BlockReduce<T>(v_storage).Sum(v_val);
if (threadIdx.x == 0) {
const T mu = m_val * scale;
mean[i] = mu;
variance[i] = v_val * scale - mu * mu;
}
__syncthreads();
}
}
template <typename T, int D>
__global__ void MomentsCUDAKernel(
const int outer_size,
const int inner_size,
SimpleArray<int, D> X_strides,
SimpleArray<FixedDivisor<int>, D> Y_dims,
const T* X,
T* mean,
T* variance) {
__shared__ typename BlockReduce<T>::TempStorage m_storage;
__shared__ typename BlockReduce<T>::TempStorage v_storage;
const T scale = T(1) / static_cast<T>(inner_size);
for (int i = blockIdx.x; i < outer_size; i += gridDim.x) {
T m_val = 0;
T v_val = 0;
for (int j = threadIdx.x; j < inner_size; j += blockDim.x) {
int X_index = 0;
int Y_index = i * inner_size + j;
#pragma unroll
for (int d = D - 1; d >= 0; --d) {
int r;
Y_dims.data[d].DivMod(Y_index, &Y_index, &r);
X_index += r * X_strides.data[d];
}
#if __CUDA_ARCH__ >= 350
m_val += __ldg(X + X_index);
v_val += __ldg(X + X_index) * __ldg(X + X_index);
#else
m_val += X[X_index];
v_val += X[X_index] * X[X_index];
#endif
}
m_val = BlockReduce<T>(m_storage).Sum(m_val);
v_val = BlockReduce<T>(v_storage).Sum(v_val);
if (threadIdx.x == 0) {
const T mu = m_val * scale;
mean[i] = mu;
variance[i] = v_val * scale - mu * mu;
}
__syncthreads();
}
}
template <typename T, int D>
CAFFE2_CUDA_EXPORT void MomentsCUDAImpl(
const int outer_size,
const int inner_size,
const int* dims,
const int* axes,
const T* X,
T* mean,
T* variance,
CUDAContext* context) {
SimpleArray<int, D> X_strides;
SimpleArray<FixedDivisor<int>, D> Y_dims;
utils::ComputeTransposedStrides(D, dims, axes, X_strides.data);
for (int i = 0; i < D; ++i) {
Y_dims.data[i] = FixedDivisor<int>(dims[axes[i]]);
}
MomentsCUDAKernel<T, D>
<<<std::min(outer_size, CAFFE_MAXIMUM_NUM_BLOCKS),
CAFFE_CUDA_NUM_THREADS,
0,
context->cuda_stream()>>>(
outer_size, inner_size, X_strides, Y_dims, X, mean, variance);
}
template <typename T>
CAFFE2_CUDA_EXPORT void MomentsCUDA(
const int num_dims,
const int* dims,
const int num_axes,
const int* axes,
const T* X,
T* mean,
T* variance,
CUDAContext* context) {
CAFFE_ENFORCE_LE(num_axes, num_dims);
std::vector<int> Y_dims_vector(dims, dims + num_dims);
for (int i = 0; i < num_axes; ++i) {
Y_dims_vector[axes[i]] = 1;
}
const int* X_dims = dims;
const int* Y_dims = Y_dims_vector.data();
const int X_size =
std::accumulate(X_dims, X_dims + num_dims, 1, std::multiplies<int>());
const int Y_size =
std::accumulate(Y_dims, Y_dims + num_dims, 1, std::multiplies<int>());
if (X_size == 0) {
Set<T, CUDAContext>(Y_size, T(0), mean, context);
Set<T, CUDAContext>(Y_size, T(0), variance, context);
return;
}
if (std::equal(X_dims, X_dims + num_dims, Y_dims)) {
cudaMemcpyAsync(
mean,
X,
sizeof(T) * X_size,
cudaMemcpyDeviceToDevice,
context->cuda_stream());
Set<T, CUDAContext>(Y_size, T(0), variance, context);
return;
}
int rows;
int cols;
if (utils::IsRowwiseReduce(num_dims, X_dims, Y_dims, &rows, &cols)) {
RowwiseMomentsCUDAKernel<T>
<<<std::min(rows, CAFFE_MAXIMUM_NUM_BLOCKS),
CAFFE_CUDA_NUM_THREADS,
0,
context->cuda_stream()>>>(rows, cols, X, mean, variance);
return;
}
if (utils::IsColwiseReduce(num_dims, X_dims, Y_dims, &rows, &cols)) {
ColwiseMomentsCUDAKernel<T>
<<<std::min(rows, CAFFE_MAXIMUM_NUM_BLOCKS),
CAFFE_CUDA_NUM_THREADS,
0,
context->cuda_stream()>>>(rows, cols, X, mean, variance);
return;
}
std::vector<int> transpose_axes(num_dims);
utils::ComputeTransposeAxesForReduceOp(
num_dims, num_axes, axes, transpose_axes.data());
const int pivot = num_dims - num_axes;
int outer_size = 1;
for (int i = 0; i < pivot; ++i) {
outer_size *= dims[transpose_axes[i]];
}
int inner_size = 1;
for (int i = pivot; i < num_dims; ++i) {
inner_size *= dims[transpose_axes[i]];
}
DISPATCH_FUNCTION_BY_VALUE_WITH_TYPE_1(
num_dims,
MomentsCUDAImpl,
T,
outer_size,
inner_size,
dims,
transpose_axes.data(),
X,
mean,
variance,
context);
}
} // namespace
#define CAFFE2_SPECIALIZED_CUDA_MOMENTS(T) \
template <> \
CAFFE2_CUDA_EXPORT void Moments<T, CUDAContext>( \
const int num_dims, \
const int* dims, \
const int num_axes, \
const int* axes, \
const T* X, \
T* mean, \
T* variance, \
CUDAContext* context) { \
MomentsCUDA<T>( \
num_dims, dims, num_axes, axes, X, mean, variance, context); \
}
CAFFE2_SPECIALIZED_CUDA_MOMENTS(float)
#undef CAFFE2_SPECIALIZED_CUDA_MOMENTS
namespace {
template <typename T>
__global__ void
InvStdCUDAKernel(const int N, const T epsilon, const T* var, T* inv_std);
#define DELEGATE_INV_STD_KERNEL_FUNCTION(T, Func) \
template <> \
__global__ void InvStdCUDAKernel<T>( \
const int N, const T epsilon, const T* var, T* inv_std) { \
CUDA_1D_KERNEL_LOOP(i, N) { \
inv_std[i] = Func(var[i] + epsilon); \
} \
}
DELEGATE_INV_STD_KERNEL_FUNCTION(float, rsqrtf)
#undef DELEGATE_INV_STD_KERNEL_FUNCTION
} // namespace
#define CAFFE2_SPECIALIZED_CUDA_INV_STD(T) \
template <> \
CAFFE2_CUDA_EXPORT void InvStd<T, CUDAContext>( \
const int N, \
const T epsilon, \
const T* var, \
T* inv_std, \
CUDAContext* context) { \
InvStdCUDAKernel<T> \
<<<CAFFE_GET_BLOCKS(N), \
CAFFE_CUDA_NUM_THREADS, \
0, \
context->cuda_stream()>>>(N, epsilon, var, inv_std); \
}
CAFFE2_SPECIALIZED_CUDA_INV_STD(float)
#undef CAFFE2_SPECIALIZED_CUDA_INV_STD
namespace {
template <typename T, int D>
__global__ void TransposeCUDAKernel(
const int size,
const SimpleArray<int, D> X_strides,
const SimpleArray<FixedDivisor<int>, D> Y_dims,
const T* X,
T* Y) {
CUDA_1D_KERNEL_LOOP(Y_index, size) {
int X_index = 0;
int Y_index_val = Y_index;
#pragma unroll
for (int i = D - 1; i >= 0; --i) {
int d;
Y_dims.data[i].DivMod(Y_index_val, &Y_index_val, &d);
X_index += d * X_strides.data[i];
}
#if __CUDA_ARCH__ >= 350
Y[Y_index] = __ldg(X + X_index);
#else
Y[Y_index] = X[X_index];
#endif
}
}
template <typename T, int D>
CAFFE2_CUDA_EXPORT void TransposeCUDAImpl(
const int* dims,
const int* axes,
const T* X,
T* Y,
CUDAContext* context) {
SimpleArray<int, D> X_strides;
SimpleArray<FixedDivisor<int>, D> Y_dims;
utils::ComputeTransposedStrides(D, dims, axes, X_strides.data);
int size = 1;
for (int i = 0; i < D; ++i) {
Y_dims.data[i] = FixedDivisor<int>(dims[axes[i]]);
size *= dims[i];
}
TransposeCUDAKernel<T, D>
<<<CAFFE_GET_BLOCKS(size),
CAFFE_CUDA_NUM_THREADS,
0,
context->cuda_stream()>>>(size, X_strides, Y_dims, X, Y);
}
} // namespace
#define CAFFE2_SPECIALIZED_CUDA_TRANSPOSE(T) \
template <> \
CAFFE2_CUDA_EXPORT void Transpose<T, CUDAContext>( \
const int ndim, \
const int* dims, \
const int* axes, \
const T* X, \
T* Y, \
CUDAContext* context) { \
if (utils::IsIdentityPermutation(ndim, axes)) { \
const int size = \
std::accumulate(dims, dims + ndim, 1, std::multiplies<int>()); \
context->template Copy<T, CUDAContext, CUDAContext>(size, X, Y); \
return; \
} \
DISPATCH_FUNCTION_BY_VALUE_WITH_TYPE_1( \
ndim, TransposeCUDAImpl, T, dims, axes, X, Y, context); \
}
CAFFE2_SPECIALIZED_CUDA_TRANSPOSE(float)
CAFFE2_SPECIALIZED_CUDA_TRANSPOSE(double)
CAFFE2_SPECIALIZED_CUDA_TRANSPOSE(int)
CAFFE2_SPECIALIZED_CUDA_TRANSPOSE(TIndex)
#undef CAFFE2_SPECIALIZED_CUDA_TRANSPOSE
namespace {
template <typename T, StorageOrder kOrder>
__global__ void AffineChannelCUDAKernel(
const int size,
const int C,
const int HxW,
const T* X,
const T* scale,
const T* bias,
T* Y) {
CUDA_1D_KERNEL_LOOP(i, size) {
const int c = kOrder == StorageOrder::NCHW ? i / HxW % C : i % C;
#if __CUDA_ARCH__ >= 350
Y[i] = __ldg(scale + c) * __ldg(X + i) + __ldg(bias + c);
#else
Y[i] = scale[c] * X[i] + bias[c];
#endif
}
}
} // namespace
#define CAFFE2_SPECIALIZED_CUDA_AFFINE_CHANNEL(T, kOrder) \
template <> \
CAFFE2_CUDA_EXPORT void AffineChannel<T, CUDAContext, kOrder>( \
const int N, \
const int C, \
const int HxW, \
const T* X, \
const T* scale, \
const T* bias, \
T* Y, \
CUDAContext* context) { \
const int size = N * C * HxW; \
AffineChannelCUDAKernel<T, kOrder> \
<<<CAFFE_GET_BLOCKS(size), \
CAFFE_CUDA_NUM_THREADS, \
0, \
context->cuda_stream()>>>(size, C, HxW, X, scale, bias, Y); \
}
CAFFE2_SPECIALIZED_CUDA_AFFINE_CHANNEL(float, StorageOrder::NCHW)
CAFFE2_SPECIALIZED_CUDA_AFFINE_CHANNEL(float, StorageOrder::NHWC)
#undef CAFFE2_SPECIALIZED_CUDA_AFFINE_CHANNEL
} // namespace math
} // namespace caffe2
|
5afbaa2338922e1d10b65b88f8b2c65bbb88962b.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "includes.h"
// Device input vectors
int *d_a;
//Device output vector
int *d_b;
__global__ void downSweep(int *A, int size, int iteration) {
const int index = blockIdx.x * blockDim.x + threadIdx.x;
if (index < size) {
int aux;
if (!((index + 1) % (1 << (iteration + 1)))) {
aux = A[index - (1<<iteration)];
A[index - (1<<iteration)] = A[index];
A[index] = aux + A[index];
}
}
} | 5afbaa2338922e1d10b65b88f8b2c65bbb88962b.cu | #include "includes.h"
// Device input vectors
int *d_a;
//Device output vector
int *d_b;
__global__ void downSweep(int *A, int size, int iteration) {
const int index = blockIdx.x * blockDim.x + threadIdx.x;
if (index < size) {
int aux;
if (!((index + 1) % (1 << (iteration + 1)))) {
aux = A[index - (1<<iteration)];
A[index - (1<<iteration)] = A[index];
A[index] = aux + A[index];
}
}
} |
2c1de207e8a2f190402bc893f039a6044abb27e5.hip | // !!! This is a file automatically generated by hipify!!!
//
// main.cpp
//
//
// Created by Elijah Afanasiev on 25.09.2018.
//
//
// System includes
#include <stdio.h>
#include <assert.h>
// CUDA runtime
#include <hip/hip_runtime.h>
#include <hip/hip_runtime.h>
#ifndef MAX
#define MAX(a,b) (a > b ? a : b)
#endif
__global__ void vectorAddGPU(float *a, float *b, float *c, int N, int offset)
{
int idx = blockIdx.x*blockDim.x + threadIdx.x;
if (idx < N)
{
c[offset + idx] = a[offset + idx] + b[offset + idx];
}
}
void sample_vec_add(int size = 1048576)
{
int n = size;
int nBytes = n*sizeof(int);
float *a, *b; // host data
float *c; // results
a = (float *)malloc(nBytes);
b = (float *)malloc(nBytes);
c = (float *)malloc(nBytes);
float *a_d,*b_d,*c_d;
dim3 block(256);
dim3 grid((unsigned int)ceil(n/(float)block.x));
for(int i=0;i<n;i++)
{
a[i] = rand() / (float)RAND_MAX;
b[i] = rand() / (float)RAND_MAX;
c[i] = 0;
}
printf("Allocating device memory on host..\n");
hipMalloc((void **)&a_d,n*sizeof(float));
hipMalloc((void **)&b_d,n*sizeof(float));
hipMalloc((void **)&c_d,n*sizeof(float));
printf("Copying to device..\n");
hipEvent_t start, stop;
hipEventCreate(&start);
hipEventCreate(&stop);
hipEventRecord(start);
hipMemcpy(a_d,a,n*sizeof(float), hipMemcpyHostToDevice);
hipMemcpy(b_d,b,n*sizeof(float), hipMemcpyHostToDevice);
printf("Doing GPU Vector add\n");
hipLaunchKernelGGL(( vectorAddGPU), dim3(grid), dim3(block), 0, 0, a_d, b_d, c_d, n, 0);
hipEventRecord(stop);
hipEventSynchronize(stop);
float milliseconds = 0;
hipEventElapsedTime(&milliseconds, start, stop);
printf("time: %f ms\n", milliseconds);
hipDeviceSynchronize();
hipFree(a_d);
hipFree(b_d);
hipFree(c_d);
}
void streams_vec_add(int size = 1048576)
{
int n = size;
int nBytes = n*sizeof(int);
float *a, *b; // host data
float *c; // results
hipHostMalloc( (void**) &a, n * sizeof(float) ,hipHostMallocDefault );
hipHostMalloc( (void**) &b, n * sizeof(float) ,hipHostMallocDefault );
hipHostMalloc( (void**) &c, n * sizeof(float) ,hipHostMallocDefault );
float *a_d,*b_d,*c_d;
for(int i=0;i<n;i++)
{
a[i] = rand() / (float)RAND_MAX;
b[i] = rand() / (float)RAND_MAX;
c[i] = 0;
}
printf("Allocating device memory on host..\n");
hipMalloc((void **)&a_d,n*sizeof(float));
printf("here\n");
hipMalloc((void **)&b_d,n*sizeof(float));
hipMalloc((void **)&c_d,n*sizeof(float));
printf("Copying to device..\n");
hipEvent_t start, stop;
hipEventCreate(&start);
hipEventCreate(&stop);
hipEventRecord(start);
printf("Doing GPU Vector add\n");
//vectorAddGPU<<<grid, block>>>(a_d, b_d, c_d, n);
const int NbStreams = 8;
const int StreamSize = n / NbStreams;
hipStream_t Stream[ NbStreams ];
for ( int i = 0; i < NbStreams; i++ )
hipStreamCreate(&Stream[ i ]);
for ( int i = 0; i < NbStreams; i++ )
{
int Offset = i * StreamSize;
hipMemcpyAsync(&a_d[ Offset ], &a[ Offset ], StreamSize * sizeof(float), hipMemcpyHostToDevice, Stream[ i ]);
hipMemcpyAsync(&b_d[ Offset ], &b[ Offset ], StreamSize * sizeof(float), hipMemcpyHostToDevice, Stream[ i ]);
hipMemcpyAsync(&c_d[ Offset ], &c[ Offset ], StreamSize * sizeof(float), hipMemcpyHostToDevice, Stream[ i ]);
dim3 block(1024);
dim3 grid((StreamSize - 1)/1024 + 1);
hipLaunchKernelGGL(( vectorAddGPU), dim3(grid), dim3(block), 0, 0, a_d, b_d, c_d, StreamSize, Offset);
hipMemcpyAsync(&c[ Offset ], &c_d[ Offset ], StreamSize * sizeof(float), hipMemcpyDeviceToHost, Stream[ i ]);
}
hipEventRecord(stop);
hipEventSynchronize(stop);
float milliseconds = 0;
hipEventElapsedTime(&milliseconds, start, stop);
printf("time: %f ms\n", milliseconds);
hipDeviceSynchronize();
hipFree(a_d);
hipFree(b_d);
hipFree(c_d);
hipHostFree(a);
hipHostFree(b);
hipHostFree(c);
}
int main(int argc, char **argv)
{
//sample_vec_add(atoi(argv[1]));
streams_vec_add(atoi(argv[1]));
return 0;
}
| 2c1de207e8a2f190402bc893f039a6044abb27e5.cu | //
// main.cpp
//
//
// Created by Elijah Afanasiev on 25.09.2018.
//
//
// System includes
#include <stdio.h>
#include <assert.h>
// CUDA runtime
#include <cuda.h>
#include <cuda_runtime.h>
#ifndef MAX
#define MAX(a,b) (a > b ? a : b)
#endif
__global__ void vectorAddGPU(float *a, float *b, float *c, int N, int offset)
{
int idx = blockIdx.x*blockDim.x + threadIdx.x;
if (idx < N)
{
c[offset + idx] = a[offset + idx] + b[offset + idx];
}
}
void sample_vec_add(int size = 1048576)
{
int n = size;
int nBytes = n*sizeof(int);
float *a, *b; // host data
float *c; // results
a = (float *)malloc(nBytes);
b = (float *)malloc(nBytes);
c = (float *)malloc(nBytes);
float *a_d,*b_d,*c_d;
dim3 block(256);
dim3 grid((unsigned int)ceil(n/(float)block.x));
for(int i=0;i<n;i++)
{
a[i] = rand() / (float)RAND_MAX;
b[i] = rand() / (float)RAND_MAX;
c[i] = 0;
}
printf("Allocating device memory on host..\n");
cudaMalloc((void **)&a_d,n*sizeof(float));
cudaMalloc((void **)&b_d,n*sizeof(float));
cudaMalloc((void **)&c_d,n*sizeof(float));
printf("Copying to device..\n");
cudaEvent_t start, stop;
cudaEventCreate(&start);
cudaEventCreate(&stop);
cudaEventRecord(start);
cudaMemcpy(a_d,a,n*sizeof(float), cudaMemcpyHostToDevice);
cudaMemcpy(b_d,b,n*sizeof(float), cudaMemcpyHostToDevice);
printf("Doing GPU Vector add\n");
vectorAddGPU<<<grid, block>>>(a_d, b_d, c_d, n, 0);
cudaEventRecord(stop);
cudaEventSynchronize(stop);
float milliseconds = 0;
cudaEventElapsedTime(&milliseconds, start, stop);
printf("time: %f ms\n", milliseconds);
cudaDeviceSynchronize();
cudaFree(a_d);
cudaFree(b_d);
cudaFree(c_d);
}
void streams_vec_add(int size = 1048576)
{
int n = size;
int nBytes = n*sizeof(int);
float *a, *b; // host data
float *c; // results
cudaHostAlloc( (void**) &a, n * sizeof(float) ,cudaHostAllocDefault );
cudaHostAlloc( (void**) &b, n * sizeof(float) ,cudaHostAllocDefault );
cudaHostAlloc( (void**) &c, n * sizeof(float) ,cudaHostAllocDefault );
float *a_d,*b_d,*c_d;
for(int i=0;i<n;i++)
{
a[i] = rand() / (float)RAND_MAX;
b[i] = rand() / (float)RAND_MAX;
c[i] = 0;
}
printf("Allocating device memory on host..\n");
cudaMalloc((void **)&a_d,n*sizeof(float));
printf("here\n");
cudaMalloc((void **)&b_d,n*sizeof(float));
cudaMalloc((void **)&c_d,n*sizeof(float));
printf("Copying to device..\n");
cudaEvent_t start, stop;
cudaEventCreate(&start);
cudaEventCreate(&stop);
cudaEventRecord(start);
printf("Doing GPU Vector add\n");
//vectorAddGPU<<<grid, block>>>(a_d, b_d, c_d, n);
const int NbStreams = 8;
const int StreamSize = n / NbStreams;
cudaStream_t Stream[ NbStreams ];
for ( int i = 0; i < NbStreams; i++ )
cudaStreamCreate(&Stream[ i ]);
for ( int i = 0; i < NbStreams; i++ )
{
int Offset = i * StreamSize;
cudaMemcpyAsync(&a_d[ Offset ], &a[ Offset ], StreamSize * sizeof(float), cudaMemcpyHostToDevice, Stream[ i ]);
cudaMemcpyAsync(&b_d[ Offset ], &b[ Offset ], StreamSize * sizeof(float), cudaMemcpyHostToDevice, Stream[ i ]);
cudaMemcpyAsync(&c_d[ Offset ], &c[ Offset ], StreamSize * sizeof(float), cudaMemcpyHostToDevice, Stream[ i ]);
dim3 block(1024);
dim3 grid((StreamSize - 1)/1024 + 1);
vectorAddGPU<<<grid, block>>>(a_d, b_d, c_d, StreamSize, Offset);
cudaMemcpyAsync(&c[ Offset ], &c_d[ Offset ], StreamSize * sizeof(float), cudaMemcpyDeviceToHost, Stream[ i ]);
}
cudaEventRecord(stop);
cudaEventSynchronize(stop);
float milliseconds = 0;
cudaEventElapsedTime(&milliseconds, start, stop);
printf("time: %f ms\n", milliseconds);
cudaDeviceSynchronize();
cudaFree(a_d);
cudaFree(b_d);
cudaFree(c_d);
cudaFreeHost(a);
cudaFreeHost(b);
cudaFreeHost(c);
}
int main(int argc, char **argv)
{
//sample_vec_add(atoi(argv[1]));
streams_vec_add(atoi(argv[1]));
return 0;
}
|
collision_check.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "collision_check_with_path/collision_check.cuh"
#define PI 3.1415926535897
#define arraySize 100
__global__ void print_cuda_kernel_test()
{
printf("Hello from block %d, thread %d\n", blockIdx.x, threadIdx.x);
}
// Run on GPU
__global__ void add(int *a, int *b, int *c) {
*c = *a + *b;
}
__global__ void calculate_collision_cuda_kernel(GridMap* device_grid_map)//PathCandidates* device_candidates)//, GridMap* device_grid_map, int grid_map_width_size, int grid_map_height_size)
{
// int index = blockIdx.x * blockDim.x + threadIdx.x;
// PathCandidates single_path = device_candidates[index];
// device_candidates -> points_x_
// GridMap * grid_map_ = (GridMap*)malloc(m_OccupancyGrid_ptr->info.width * sizeof(GridMap));
// for (unsigned int width = 0; width < m_OccupancyGrid_ptr->info.width; width++){
// grid_map_[width].occupied_intensity_ = (int*)malloc(m_OccupancyGrid_ptr->info.height * sizeof(int));
// for (unsigned int height = 0; height < m_OccupancyGrid_ptr->info.height; height++)
// {
// if(m_OccupancyGrid_ptr->data[height * m_OccupancyGrid_ptr->info.width + width] > 0)
// {
// *(grid_map_[width].occupied_intensity_ + height) = 255;
// geometry_msgs::Pose obstacle;
// obstacle.position.x = width * m_OccupancyGrid_ptr->info.resolution + m_OccupancyGrid_ptr->info.resolution /2 + m_OccupancyGrid_ptr->info.origin.position.x;
// obstacle.position.y = height * m_OccupancyGrid_ptr->info.resolution + m_OccupancyGrid_ptr->info.resolution /2 + m_OccupancyGrid_ptr->info.origin.position.y;
// m_Obstacles.push_back(obstacle);
// }
// else
// {
// *(grid_map_[width].occupied_intensity_ + height) = 0;
// }
// }
// }
// for (int k =0; k < m_Obstacles.size(); k++)
// {
// for (int i =0; i < RollOut.size(); i++)
// {
// for (int j = 0; j < RollOut.at(i).size(); j++)
// {
// double x_wpt_ = RollOut.at(i).at(j).pos.x;
// double y_wpt_ = RollOut.at(i).at(j).pos.y;
// double dist_to_obstacle = sqrt(pow(m_Obstacles.at(k).position.x - x_wpt_ ,2) + pow(m_Obstacles.at(k).position.y - y_wpt_,2));
// if (dist_to_obstacle < m_obstacle_radius)
// {
// int once;
// if (once == k)
// continue;
// std::cout << "x: "<<m_Obstacles.at(k).position.x << " y: " << m_Obstacles.at(k).position.y << "dist: "<< dist_to_obstacle<< std::endl;
// visualization_msgs::Marker test_marker;
// once = k;
// }
// }
// }
// }
}
__global__ void addKernel( int *c, const int *a, const int *b )
{
// int i = threadIdx.x;
int i = blockIdx.x ;
if( i < arraySize )
c[i] = a[i] + b[i];
}
void path_candidates_initialize(
PathCandidates* candidates, int candidates_size,
GridMap* grid_map, int grid_map_width_size, int grid_map_height_size)
{
int a[arraySize];
int b[arraySize];
int c[arraySize];
int *dev_a = 0;
int *dev_b = 0;
int *dev_c = 0;
// fill the arrays 'a' and 'b' on the CPU
for( int i = 0 ; i < arraySize ; i++ ) {
a[i] = i;
b[i] = i;
}
// Add vectors in parallel.
// Allocate GPU buffers for three vectors (two input, one output)
hipMalloc((void**)&dev_c, arraySize * sizeof(int));
hipMalloc((void**)&dev_a, arraySize * sizeof(int));
hipMalloc((void**)&dev_b, arraySize * sizeof(int));
// copy the arrays 'a' and 'b' to the GPU
hipMemcpy(dev_a, a, arraySize * sizeof(int), hipMemcpyHostToDevice);
hipMemcpy(dev_b, b, arraySize * sizeof(int), hipMemcpyHostToDevice);
hipLaunchKernelGGL(( addKernel), dim3(arraySize), dim3(1), 0, 0, dev_c, dev_a, dev_b);
hipDeviceSynchronize();
// copy the array 'c' back from the GPU to the CPU
hipMemcpy(&c, dev_c, arraySize * sizeof(int), hipMemcpyDeviceToHost);
// display the results
for( int i = 0 ; i < arraySize ; i++ ) {
printf( "%d + %d = %d\n", a[i], b[i], c[i] );
}
// free the memory allocated on the GPU
hipFree(dev_c);
hipFree(dev_a);
hipFree(dev_b);
// printf("initialize is working, candidates size: %d\n", candidates_size);
// PathCandidates* device_candidates;
// hipMalloc((void**)&device_candidates, candidates_size * sizeof(PathCandidates));
// hipMemcpy(device_candidates, candidates, candidates_size * sizeof(PathCandidates), hipMemcpyHostToDevice);
// printf("1\n");
// for (int i =0; i < candidates_size; i++)
// {
// printf("points size: %d \n" ,candidates[i].points_size_);
// PathCandidates single_path = candidates[i];
// // points_x_
// hipMalloc((void**)&(device_candidates[i].points_x_), single_path.points_size_ * sizeof(double));
// hipMemcpy(device_candidates[i].points_x_, single_path.points_x_, single_path.points_size_ * sizeof(double), hipMemcpyHostToDevice);
// //points_y_
// hipMalloc((void**)&(device_candidates[i].points_y_), single_path.points_size_ * sizeof(double));
// hipMemcpy(device_candidates[i].points_y_, single_path.points_y_, single_path.points_size_ * sizeof(double), hipMemcpyHostToDevice);
// }
// printf("grid size: (%d * %d) \n", grid_map_width_size, grid_map_height_size);
// GridMap* device_grid_map;
// hipMalloc((void**)&device_grid_map, grid_map_width_size * sizeof(GridMap));
// hipMemcpy(device_grid_map, grid_map, grid_map_width_size * sizeof(GridMap), hipMemcpyHostToDevice);
// for (int i =0; i < grid_map_width_size; i++){
// GridMap host_grid_map = grid_map[i];
// //host_grid_map_x
// hipMalloc((void**)&(device_grid_map[i].grid_x_), grid_map_height_size * sizeof(double));
// hipMemcpy(device_grid_map[i].grid_x_, host_grid_map.grid_x_, grid_map_height_size * sizeof(double), hipMemcpyHostToDevice);
// //host_grid_map_y
// hipMalloc((void**)&(device_grid_map[i].grid_y_), grid_map_height_size * sizeof(double));
// hipMemcpy(device_grid_map[i].grid_y_, host_grid_map.grid_y_, grid_map_height_size * sizeof(double), hipMemcpyHostToDevice);
// //intensity
// hipMalloc((void**)&(device_grid_map[i].occupied_intensity_), grid_map_height_size * sizeof(int));
// hipMemcpy(device_grid_map[i].occupied_intensity_, host_grid_map.occupied_intensity_, grid_map_height_size * sizeof(int), hipMemcpyHostToDevice);
// }
// dim3 grid(grid_map_width_size, grid_map_height_size);
// // dim3 block(candidates_size, 100, 100);
// // calculate_collision_cuda_kernel<<<grid, 1>>>(device_grid_map);
// // calculate_collision_cuda_kernel<<<1, 1>>>();//, candidates_size); //, device_grid_map, grid_map_width_size, grid_map_height_size);
// for (int i =0; i < candidates_size; i++){
// //points_x_
// hipMemcpy(candidates[i].points_x_, device_candidates[i].points_x_, candidates[i].points_size_ * sizeof(double), hipMemcpyDeviceToHost);
// //points_y_
// hipMemcpy(candidates[i].points_y_, device_candidates[i].points_y_, candidates[i].points_size_ * sizeof(double), hipMemcpyDeviceToHost);
// }
// for (int i =0; i < grid_map_width_size; i++)
// {
// //host_grid_map_x
// hipMemcpy(grid_map[i].grid_x_, device_grid_map[i].grid_x_, grid_map[i].grid_x_size_ * sizeof(unsigned int), hipMemcpyDeviceToHost);
// //host_grid_map_y
// hipMemcpy(grid_map[i].grid_y_, device_grid_map[i].grid_y_, grid_map[i].grid_y_size_ * sizeof(unsigned int), hipMemcpyDeviceToHost);
// //intensity
// hipMemcpy(grid_map[i].occupied_intensity_, device_grid_map[i].occupied_intensity_, grid_map[i].occupied_intensity_size_ * sizeof(unsigned int), hipMemcpyDeviceToHost);
// }
// // Cleanup
// printf("1\n");
// for (int i =0; i < candidates_size; i++)
// {
// free(candidates[i].points_x_); // candidates[i].points_x_ = (double*)malloc(device_single_path.points_size_ * sizeof(double));
// free(candidates[i].points_y_); // device_single_path.points_y_ = (double*)malloc(device_single_path.points_size_ * sizeof(double));
// }
// free(candidates);
// printf("2\n");
// for (int i =0; i < grid_map_width_size; i++)
// {
// free(grid_map[i].grid_x_);
// free(grid_map[i].grid_y_);
// free(grid_map[i].occupied_intensity_);
// }
// free(grid_map);
// printf("3\n");
// for (int i =0; i < candidates_size; i++)
// {
// hipFree(device_candidates[i].points_x_);
// hipFree(device_candidates[i].points_y_);
// }
// hipFree(device_candidates);
// for (int i =0; i < candidates_size; i++)
// {
// hipFree(device_grid_map[i].grid_x_);
// hipFree(device_grid_map[i].grid_y_);
// hipFree(device_grid_map[i].occupied_intensity_);
// }
// hipFree(device_grid_map);
} | collision_check.cu | #include "collision_check_with_path/collision_check.cuh"
#define PI 3.1415926535897
#define arraySize 100
__global__ void print_cuda_kernel_test()
{
printf("Hello from block %d, thread %d\n", blockIdx.x, threadIdx.x);
}
// Run on GPU
__global__ void add(int *a, int *b, int *c) {
*c = *a + *b;
}
__global__ void calculate_collision_cuda_kernel(GridMap* device_grid_map)//PathCandidates* device_candidates)//, GridMap* device_grid_map, int grid_map_width_size, int grid_map_height_size)
{
// int index = blockIdx.x * blockDim.x + threadIdx.x;
// PathCandidates single_path = device_candidates[index];
// device_candidates -> points_x_
// GridMap * grid_map_ = (GridMap*)malloc(m_OccupancyGrid_ptr->info.width * sizeof(GridMap));
// for (unsigned int width = 0; width < m_OccupancyGrid_ptr->info.width; width++){
// grid_map_[width].occupied_intensity_ = (int*)malloc(m_OccupancyGrid_ptr->info.height * sizeof(int));
// for (unsigned int height = 0; height < m_OccupancyGrid_ptr->info.height; height++)
// {
// if(m_OccupancyGrid_ptr->data[height * m_OccupancyGrid_ptr->info.width + width] > 0)
// {
// *(grid_map_[width].occupied_intensity_ + height) = 255;
// geometry_msgs::Pose obstacle;
// obstacle.position.x = width * m_OccupancyGrid_ptr->info.resolution + m_OccupancyGrid_ptr->info.resolution /2 + m_OccupancyGrid_ptr->info.origin.position.x;
// obstacle.position.y = height * m_OccupancyGrid_ptr->info.resolution + m_OccupancyGrid_ptr->info.resolution /2 + m_OccupancyGrid_ptr->info.origin.position.y;
// m_Obstacles.push_back(obstacle);
// }
// else
// {
// *(grid_map_[width].occupied_intensity_ + height) = 0;
// }
// }
// }
// for (int k =0; k < m_Obstacles.size(); k++)
// {
// for (int i =0; i < RollOut.size(); i++)
// {
// for (int j = 0; j < RollOut.at(i).size(); j++)
// {
// double x_wpt_ = RollOut.at(i).at(j).pos.x;
// double y_wpt_ = RollOut.at(i).at(j).pos.y;
// double dist_to_obstacle = sqrt(pow(m_Obstacles.at(k).position.x - x_wpt_ ,2) + pow(m_Obstacles.at(k).position.y - y_wpt_,2));
// if (dist_to_obstacle < m_obstacle_radius)
// {
// int once;
// if (once == k)
// continue;
// std::cout << "x: "<<m_Obstacles.at(k).position.x << " y: " << m_Obstacles.at(k).position.y << "dist: "<< dist_to_obstacle<< std::endl;
// visualization_msgs::Marker test_marker;
// once = k;
// }
// }
// }
// }
}
__global__ void addKernel( int *c, const int *a, const int *b )
{
// int i = threadIdx.x;
int i = blockIdx.x ;
if( i < arraySize )
c[i] = a[i] + b[i];
}
void path_candidates_initialize(
PathCandidates* candidates, int candidates_size,
GridMap* grid_map, int grid_map_width_size, int grid_map_height_size)
{
int a[arraySize];
int b[arraySize];
int c[arraySize];
int *dev_a = 0;
int *dev_b = 0;
int *dev_c = 0;
// fill the arrays 'a' and 'b' on the CPU
for( int i = 0 ; i < arraySize ; i++ ) {
a[i] = i;
b[i] = i;
}
// Add vectors in parallel.
// Allocate GPU buffers for three vectors (two input, one output)
cudaMalloc((void**)&dev_c, arraySize * sizeof(int));
cudaMalloc((void**)&dev_a, arraySize * sizeof(int));
cudaMalloc((void**)&dev_b, arraySize * sizeof(int));
// copy the arrays 'a' and 'b' to the GPU
cudaMemcpy(dev_a, a, arraySize * sizeof(int), cudaMemcpyHostToDevice);
cudaMemcpy(dev_b, b, arraySize * sizeof(int), cudaMemcpyHostToDevice);
addKernel<<<arraySize, 1>>>(dev_c, dev_a, dev_b);
cudaDeviceSynchronize();
// copy the array 'c' back from the GPU to the CPU
cudaMemcpy(&c, dev_c, arraySize * sizeof(int), cudaMemcpyDeviceToHost);
// display the results
for( int i = 0 ; i < arraySize ; i++ ) {
printf( "%d + %d = %d\n", a[i], b[i], c[i] );
}
// free the memory allocated on the GPU
cudaFree(dev_c);
cudaFree(dev_a);
cudaFree(dev_b);
// printf("initialize is working, candidates size: %d\n", candidates_size);
// PathCandidates* device_candidates;
// cudaMalloc((void**)&device_candidates, candidates_size * sizeof(PathCandidates));
// cudaMemcpy(device_candidates, candidates, candidates_size * sizeof(PathCandidates), cudaMemcpyHostToDevice);
// printf("1\n");
// for (int i =0; i < candidates_size; i++)
// {
// printf("points size: %d \n" ,candidates[i].points_size_);
// PathCandidates single_path = candidates[i];
// // points_x_
// cudaMalloc((void**)&(device_candidates[i].points_x_), single_path.points_size_ * sizeof(double));
// cudaMemcpy(device_candidates[i].points_x_, single_path.points_x_, single_path.points_size_ * sizeof(double), cudaMemcpyHostToDevice);
// //points_y_
// cudaMalloc((void**)&(device_candidates[i].points_y_), single_path.points_size_ * sizeof(double));
// cudaMemcpy(device_candidates[i].points_y_, single_path.points_y_, single_path.points_size_ * sizeof(double), cudaMemcpyHostToDevice);
// }
// printf("grid size: (%d * %d) \n", grid_map_width_size, grid_map_height_size);
// GridMap* device_grid_map;
// cudaMalloc((void**)&device_grid_map, grid_map_width_size * sizeof(GridMap));
// cudaMemcpy(device_grid_map, grid_map, grid_map_width_size * sizeof(GridMap), cudaMemcpyHostToDevice);
// for (int i =0; i < grid_map_width_size; i++){
// GridMap host_grid_map = grid_map[i];
// //host_grid_map_x
// cudaMalloc((void**)&(device_grid_map[i].grid_x_), grid_map_height_size * sizeof(double));
// cudaMemcpy(device_grid_map[i].grid_x_, host_grid_map.grid_x_, grid_map_height_size * sizeof(double), cudaMemcpyHostToDevice);
// //host_grid_map_y
// cudaMalloc((void**)&(device_grid_map[i].grid_y_), grid_map_height_size * sizeof(double));
// cudaMemcpy(device_grid_map[i].grid_y_, host_grid_map.grid_y_, grid_map_height_size * sizeof(double), cudaMemcpyHostToDevice);
// //intensity
// cudaMalloc((void**)&(device_grid_map[i].occupied_intensity_), grid_map_height_size * sizeof(int));
// cudaMemcpy(device_grid_map[i].occupied_intensity_, host_grid_map.occupied_intensity_, grid_map_height_size * sizeof(int), cudaMemcpyHostToDevice);
// }
// dim3 grid(grid_map_width_size, grid_map_height_size);
// // dim3 block(candidates_size, 100, 100);
// // calculate_collision_cuda_kernel<<<grid, 1>>>(device_grid_map);
// // calculate_collision_cuda_kernel<<<1, 1>>>();//, candidates_size); //, device_grid_map, grid_map_width_size, grid_map_height_size);
// for (int i =0; i < candidates_size; i++){
// //points_x_
// cudaMemcpy(candidates[i].points_x_, device_candidates[i].points_x_, candidates[i].points_size_ * sizeof(double), cudaMemcpyDeviceToHost);
// //points_y_
// cudaMemcpy(candidates[i].points_y_, device_candidates[i].points_y_, candidates[i].points_size_ * sizeof(double), cudaMemcpyDeviceToHost);
// }
// for (int i =0; i < grid_map_width_size; i++)
// {
// //host_grid_map_x
// cudaMemcpy(grid_map[i].grid_x_, device_grid_map[i].grid_x_, grid_map[i].grid_x_size_ * sizeof(unsigned int), cudaMemcpyDeviceToHost);
// //host_grid_map_y
// cudaMemcpy(grid_map[i].grid_y_, device_grid_map[i].grid_y_, grid_map[i].grid_y_size_ * sizeof(unsigned int), cudaMemcpyDeviceToHost);
// //intensity
// cudaMemcpy(grid_map[i].occupied_intensity_, device_grid_map[i].occupied_intensity_, grid_map[i].occupied_intensity_size_ * sizeof(unsigned int), cudaMemcpyDeviceToHost);
// }
// // Cleanup
// printf("1\n");
// for (int i =0; i < candidates_size; i++)
// {
// free(candidates[i].points_x_); // candidates[i].points_x_ = (double*)malloc(device_single_path.points_size_ * sizeof(double));
// free(candidates[i].points_y_); // device_single_path.points_y_ = (double*)malloc(device_single_path.points_size_ * sizeof(double));
// }
// free(candidates);
// printf("2\n");
// for (int i =0; i < grid_map_width_size; i++)
// {
// free(grid_map[i].grid_x_);
// free(grid_map[i].grid_y_);
// free(grid_map[i].occupied_intensity_);
// }
// free(grid_map);
// printf("3\n");
// for (int i =0; i < candidates_size; i++)
// {
// cudaFree(device_candidates[i].points_x_);
// cudaFree(device_candidates[i].points_y_);
// }
// cudaFree(device_candidates);
// for (int i =0; i < candidates_size; i++)
// {
// cudaFree(device_grid_map[i].grid_x_);
// cudaFree(device_grid_map[i].grid_y_);
// cudaFree(device_grid_map[i].occupied_intensity_);
// }
// cudaFree(device_grid_map);
} |
f7a7e6c2a75b50a949c8f7f35c857a84ad0e196f.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/*
* Copyright (c) 2018-2023, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include "page_decode.cuh"
#include "page_string_utils.cuh"
#include <cudf/detail/utilities/cuda.cuh>
#include <cudf/strings/detail/gather.cuh>
namespace cudf {
namespace io {
namespace parquet {
namespace gpu {
namespace {
constexpr int preprocess_block_size = 512;
constexpr int decode_block_size = 128;
constexpr int rolling_buf_size = decode_block_size * 2;
constexpr int preproc_buf_size = LEVEL_DECODE_BUF_SIZE;
/**
* @brief Compute the start and end page value bounds for this page
*
* This uses definition and repetition level info to determine the number of valid and null
* values for the page, taking into account skip_rows/num_rows (if set).
*
* @param s The local page info
* @param min_row Row index to start reading at
* @param num_rows Maximum number of rows to read
* @param is_bounds_pg True if this page is clipped
* @param has_repetition True if the schema is nested
* @param decoders Definition and repetition level decoders
* @return pair containing start and end value indexes
* @tparam level_t Type used to store decoded repetition and definition levels
* @tparam rle_buf_size Size of the buffer used when decoding repetition and definition levels
*/
template <typename level_t, int rle_buf_size>
__device__ thrust::pair<int, int> page_bounds(page_state_s* const s,
size_t min_row,
size_t num_rows,
bool is_bounds_pg,
bool has_repetition,
rle_stream<level_t, rle_buf_size>* decoders)
{
using block_reduce = hipcub::BlockReduce<int, preprocess_block_size>;
using block_scan = hipcub::BlockScan<int, preprocess_block_size>;
__shared__ union {
typename block_reduce::TempStorage reduce_storage;
typename block_scan::TempStorage scan_storage;
} temp_storage;
auto const t = threadIdx.x;
// decode batches of level stream data using rle_stream objects and use the results to
// calculate start and end value positions in the encoded string data.
int const max_depth = s->col.max_nesting_depth;
int const max_def = s->nesting_info[max_depth - 1].max_def_level;
// can skip all this if we know there are no nulls
if (max_def == 0 && !is_bounds_pg) {
s->page.num_valids = s->num_input_values;
s->page.num_nulls = 0;
return {0, s->num_input_values};
}
int start_value = 0;
int end_value = s->page.num_input_values;
auto const pp = &s->page;
auto const col = &s->col;
// initialize the stream decoders (requires values computed in setupLocalPageInfo)
auto const def_decode = reinterpret_cast<level_t*>(pp->lvl_decode_buf[level_type::DEFINITION]);
auto const rep_decode = reinterpret_cast<level_t*>(pp->lvl_decode_buf[level_type::REPETITION]);
decoders[level_type::DEFINITION].init(s->col.level_bits[level_type::DEFINITION],
s->abs_lvl_start[level_type::DEFINITION],
s->abs_lvl_end[level_type::DEFINITION],
preproc_buf_size,
def_decode,
s->page.num_input_values);
// only need repetition if this is a bounds page. otherwise all we need is def level info
// to count the nulls.
if (has_repetition && is_bounds_pg) {
decoders[level_type::REPETITION].init(s->col.level_bits[level_type::REPETITION],
s->abs_lvl_start[level_type::REPETITION],
s->abs_lvl_end[level_type::REPETITION],
preproc_buf_size,
rep_decode,
s->page.num_input_values);
}
int processed = 0;
// if this is a bounds page, we need to do extra work to find the start and/or end value index
if (is_bounds_pg) {
__shared__ int skipped_values;
__shared__ int skipped_leaf_values;
__shared__ int last_input_value;
__shared__ int end_val_idx;
// need these for skip_rows case
auto const page_start_row = col->start_row + pp->chunk_row;
auto const max_row = min_row + num_rows;
auto const begin_row = page_start_row >= min_row ? 0 : min_row - page_start_row;
auto const max_page_rows = pp->num_rows - begin_row;
auto const page_rows = page_start_row + begin_row + max_page_rows <= max_row
? max_page_rows
: max_row - (page_start_row + begin_row);
auto end_row = begin_row + page_rows;
int row_fudge = -1;
// short circuit for no nulls
if (max_def == 0 && !has_repetition) {
if (t == 0) {
pp->num_nulls = 0;
pp->num_valids = end_row - begin_row;
}
return {begin_row, end_row};
}
int row_count = 0;
int leaf_count = 0;
bool skipped_values_set = false;
bool end_value_set = false;
while (processed < s->page.num_input_values) {
thread_index_type start_val = processed;
if (has_repetition) {
decoders[level_type::REPETITION].decode_next(t);
__syncthreads();
// special case where page does not begin at a row boundary
if (processed == 0 && rep_decode[0] != 0) {
if (t == 0) {
skipped_values = 0;
skipped_leaf_values = 0;
}
skipped_values_set = true;
end_row++; // need to finish off the previous row
row_fudge = 0;
}
}
// the # of rep/def levels will always be the same size
processed += decoders[level_type::DEFINITION].decode_next(t);
__syncthreads();
// do something with the level data
while (start_val < processed) {
auto const idx_t = start_val + t;
auto const idx = rolling_index<preproc_buf_size>(idx_t);
// get absolute thread row index
int is_new_row = idx_t < processed && (!has_repetition || rep_decode[idx] == 0);
int thread_row_count, block_row_count;
block_scan(temp_storage.scan_storage)
.InclusiveSum(is_new_row, thread_row_count, block_row_count);
__syncthreads();
// get absolute thread leaf index
int const is_new_leaf = idx_t < processed && (def_decode[idx] >= max_def);
int thread_leaf_count, block_leaf_count;
block_scan(temp_storage.scan_storage)
.InclusiveSum(is_new_leaf, thread_leaf_count, block_leaf_count);
__syncthreads();
// if we have not set skipped values yet, see if we found the first in-bounds row
if (!skipped_values_set && row_count + block_row_count > begin_row) {
// if this thread is in row bounds
int const row_index = thread_row_count + row_count - 1;
int const in_row_bounds =
idx_t < processed && (row_index >= begin_row) && (row_index < end_row);
int local_count, global_count;
block_scan(temp_storage.scan_storage)
.InclusiveSum(in_row_bounds, local_count, global_count);
__syncthreads();
// we found it
if (global_count > 0) {
// this is the thread that represents the first row. need to test in_row_bounds for
// the case where we only want one row and local_count == 1 for many threads.
if (local_count == 1 && in_row_bounds) {
skipped_values = idx_t;
skipped_leaf_values =
leaf_count + (is_new_leaf ? thread_leaf_count - 1 : thread_leaf_count);
}
skipped_values_set = true;
}
}
// test if row_count will exceed end_row in this batch
if (!end_value_set && row_count + block_row_count >= end_row) {
// if this thread exceeds row bounds. row_fudge change depending on whether we've faked
// the end row to account for starting a page in the middle of a row.
int const row_index = thread_row_count + row_count + row_fudge;
int const exceeds_row_bounds = row_index >= end_row;
int local_count, global_count;
block_scan(temp_storage.scan_storage)
.InclusiveSum(exceeds_row_bounds, local_count, global_count);
__syncthreads();
// we found it
if (global_count > 0) {
// this is the thread that represents the end row.
if (local_count == 1) {
last_input_value = idx_t;
end_val_idx = leaf_count + (is_new_leaf ? thread_leaf_count - 1 : thread_leaf_count);
}
end_value_set = true;
break;
}
}
row_count += block_row_count;
leaf_count += block_leaf_count;
start_val += preprocess_block_size;
}
__syncthreads();
if (end_value_set) { break; }
}
start_value = skipped_values_set ? skipped_leaf_values : 0;
end_value = end_value_set ? end_val_idx : leaf_count;
if (t == 0) {
int const v0 = skipped_values_set ? skipped_values : 0;
int const vn = end_value_set ? last_input_value : s->num_input_values;
int const total_values = vn - v0;
int const total_leaf_values = end_value - start_value;
int const num_nulls = total_values - total_leaf_values;
pp->num_nulls = num_nulls;
pp->num_valids = total_leaf_values;
}
}
// already filtered out unwanted pages, so need to count all non-null values in this page
else {
int num_nulls = 0;
while (processed < s->page.num_input_values) {
thread_index_type start_val = processed;
processed += decoders[level_type::DEFINITION].decode_next(t);
__syncthreads();
while (start_val < processed) {
auto const idx_t = start_val + t;
if (idx_t < processed) {
auto const idx = rolling_index<preproc_buf_size>(idx_t);
if (def_decode[idx] < max_def) { num_nulls++; }
}
start_val += preprocess_block_size;
}
__syncthreads();
}
int const null_count = block_reduce(temp_storage.reduce_storage).Sum(num_nulls);
if (t == 0) {
pp->num_nulls = null_count;
pp->num_valids = pp->num_input_values - null_count;
}
__syncthreads();
end_value -= pp->num_nulls;
}
return {start_value, end_value};
}
/**
* @brief Compute string size information for dictionary encoded strings.
*
* @param data Pointer to the start of the page data stream
* @param dict_base Pointer to the start of the dictionary
* @param dict_bits The number of bits used to in the dictionary bit packing
* @param dict_size Size of the dictionary in bytes
* @param data_size Size of the page data in bytes
* @param start_value Do not count values that occur before this index
* @param end_value Do not count values that occur after this index
*/
__device__ size_t totalDictEntriesSize(uint8_t const* data,
uint8_t const* dict_base,
int dict_bits,
int dict_size,
int data_size,
int start_value,
int end_value)
{
int const t = threadIdx.x;
uint8_t const* ptr = data;
uint8_t const* const end = data + data_size;
int const bytecnt = (dict_bits + 7) >> 3;
size_t l_str_len = 0; // partial sums across threads
int pos = 0; // current value index in the data stream
int t0 = 0; // thread 0 for this batch
int dict_run = 0;
int dict_val = 0;
while (pos < end_value && ptr <= end) {
if (dict_run <= 1) {
dict_run = (ptr < end) ? get_vlq32(ptr, end) : 0;
if (!(dict_run & 1)) {
// Repeated value
if (ptr + bytecnt <= end) {
int32_t run_val = ptr[0];
if (bytecnt > 1) {
run_val |= ptr[1] << 8;
if (bytecnt > 2) {
run_val |= ptr[2] << 16;
if (bytecnt > 3) { run_val |= ptr[3] << 24; }
}
}
dict_val = run_val & ((1 << dict_bits) - 1);
}
ptr += bytecnt;
}
}
int batch_len;
if (dict_run & 1) {
// Literal batch: must output a multiple of 8, except for the last batch
int batch_len_div8;
batch_len = max(min(preprocess_block_size, (int)(dict_run >> 1) * 8), 1);
batch_len_div8 = (batch_len + 7) >> 3;
dict_run -= batch_len_div8 * 2;
ptr += batch_len_div8 * dict_bits;
} else {
batch_len = dict_run >> 1;
dict_run = 0;
}
int const is_literal = dict_run & 1;
// calculate my thread id for this batch. way to round-robin the work.
int mytid = t - t0;
if (mytid < 0) mytid += preprocess_block_size;
// compute dictionary index.
if (is_literal) {
int dict_idx = 0;
if (mytid < batch_len) {
dict_idx = dict_val;
int32_t ofs = (mytid - ((batch_len + 7) & ~7)) * dict_bits;
const uint8_t* p = ptr + (ofs >> 3);
ofs &= 7;
if (p < end) {
uint32_t c = 8 - ofs;
dict_idx = (*p++) >> ofs;
if (c < dict_bits && p < end) {
dict_idx |= (*p++) << c;
c += 8;
if (c < dict_bits && p < end) {
dict_idx |= (*p++) << c;
c += 8;
if (c < dict_bits && p < end) { dict_idx |= (*p++) << c; }
}
}
dict_idx &= (1 << dict_bits) - 1;
}
if (pos + mytid < end_value) {
uint32_t const dict_pos = (dict_bits > 0) ? dict_idx * sizeof(string_index_pair) : 0;
if (pos + mytid >= start_value && dict_pos < (uint32_t)dict_size) {
const auto* src = reinterpret_cast<const string_index_pair*>(dict_base + dict_pos);
l_str_len += src->second;
}
}
}
t0 += batch_len;
} else {
int const start_off =
(pos < start_value && pos + batch_len > start_value) ? start_value - pos : 0;
batch_len = min(batch_len, end_value - pos);
if (mytid == 0) {
uint32_t const dict_pos = (dict_bits > 0) ? dict_val * sizeof(string_index_pair) : 0;
if (pos + batch_len > start_value && dict_pos < (uint32_t)dict_size) {
const auto* src = reinterpret_cast<const string_index_pair*>(dict_base + dict_pos);
l_str_len += (batch_len - start_off) * src->second;
}
}
t0 += 1;
}
t0 = t0 % preprocess_block_size;
pos += batch_len;
}
__syncthreads();
using block_reduce = hipcub::BlockReduce<size_t, preprocess_block_size>;
__shared__ typename block_reduce::TempStorage reduce_storage;
size_t sum_l = block_reduce(reduce_storage).Sum(l_str_len);
return sum_l;
}
/**
* @brief Compute string size information for plain encoded strings.
*
* @param data Pointer to the start of the page data stream
* @param data_size Length of data
* @param start_value Do not count values that occur before this index
* @param end_value Do not count values that occur after this index
*/
__device__ size_t totalPlainEntriesSize(uint8_t const* data,
int data_size,
int start_value,
int end_value)
{
int const t = threadIdx.x;
int pos = 0;
size_t total_len = 0;
// This step is purely serial
if (!t) {
const uint8_t* cur = data;
int k = 0;
while (pos < end_value && k < data_size) {
int len;
if (k + 4 <= data_size) {
len = (cur[k]) | (cur[k + 1] << 8) | (cur[k + 2] << 16) | (cur[k + 3] << 24);
k += 4;
if (k + len > data_size) { len = 0; }
} else {
len = 0;
}
k += len;
if (pos >= start_value) { total_len += len; }
pos++;
}
}
return total_len;
}
/**
* @brief Kernel for computing string page output size information.
*
* String columns need accurate data size information to preallocate memory in the column buffer to
* store the char data. This calls a kernel to calculate information needed by the string decoding
* kernel. On exit, the `str_bytes`, `num_nulls`, and `num_valids` fields of the PageInfo struct
* are updated. This call ignores non-string columns.
*
* @param pages All pages to be decoded
* @param chunks All chunks to be decoded
* @param min_rows crop all rows below min_row
* @param num_rows Maximum number of rows to read
* @tparam level_t Type used to store decoded repetition and definition levels
*/
template <typename level_t>
__global__ void __launch_bounds__(preprocess_block_size) gpuComputePageStringSizes(
PageInfo* pages, device_span<ColumnChunkDesc const> chunks, size_t min_row, size_t num_rows)
{
__shared__ __align__(16) page_state_s state_g;
page_state_s* const s = &state_g;
int const page_idx = blockIdx.x;
int const t = threadIdx.x;
PageInfo* const pp = &pages[page_idx];
// reset str_bytes to 0 in case it's already been calculated
if (t == 0) { pp->str_bytes = 0; }
// whether or not we have repetition levels (lists)
bool const has_repetition = chunks[pp->chunk_idx].max_level[level_type::REPETITION] > 0;
// the required number of runs in shared memory we will need to provide the
// rle_stream object
constexpr int rle_run_buffer_size = rle_stream_required_run_buffer_size<preprocess_block_size>();
// the level stream decoders
__shared__ rle_run<level_t> def_runs[rle_run_buffer_size];
__shared__ rle_run<level_t> rep_runs[rle_run_buffer_size];
rle_stream<level_t, preprocess_block_size> decoders[level_type::NUM_LEVEL_TYPES] = {{def_runs},
{rep_runs}};
// setup page info
if (!setupLocalPageInfo(
s, pp, chunks, min_row, num_rows, mask_filter{KERNEL_MASK_STRING}, false)) {
return;
}
if (!t) {
s->page.num_nulls = 0;
s->page.num_valids = 0;
s->page.str_bytes = 0;
}
__syncthreads();
bool const is_bounds_pg = is_bounds_page(s, min_row, num_rows, has_repetition);
// if we're skipping this page anyway, no need to count it
if (!is_bounds_pg && !is_page_contained(s, min_row, num_rows)) { return; }
// find start/end value indices
auto const [start_value, end_value] =
page_bounds(s, min_row, num_rows, is_bounds_pg, has_repetition, decoders);
// need to save num_nulls and num_valids calculated in page_bounds in this page
if (t == 0) {
pp->num_nulls = s->page.num_nulls;
pp->num_valids = s->page.num_valids;
}
auto const& col = s->col;
size_t str_bytes = 0;
// short circuit for FIXED_LEN_BYTE_ARRAY
if ((col.data_type & 7) == FIXED_LEN_BYTE_ARRAY) {
str_bytes = pp->num_valids * s->dtype_len_in;
} else {
// now process string info in the range [start_value, end_value)
// set up for decoding strings...can be either plain or dictionary
uint8_t const* data = s->data_start;
uint8_t const* const end = s->data_end;
uint8_t const* dict_base = nullptr;
int dict_size = 0;
switch (pp->encoding) {
case Encoding::PLAIN_DICTIONARY:
case Encoding::RLE_DICTIONARY:
// RLE-packed dictionary indices, first byte indicates index length in bits
if (col.str_dict_index) {
// String dictionary: use index
dict_base = reinterpret_cast<const uint8_t*>(col.str_dict_index);
dict_size = col.page_info[0].num_input_values * sizeof(string_index_pair);
} else {
dict_base = col.page_info[0].page_data; // dictionary is always stored in the first page
dict_size = col.page_info[0].uncompressed_page_size;
}
// FIXME: need to return an error condition...this won't actually do anything
if (s->dict_bits > 32 || !dict_base) { CUDF_UNREACHABLE("invalid dictionary bit size"); }
str_bytes = totalDictEntriesSize(
data, dict_base, s->dict_bits, dict_size, (end - data), start_value, end_value);
break;
case Encoding::PLAIN:
dict_size = static_cast<int32_t>(end - data);
str_bytes = is_bounds_pg ? totalPlainEntriesSize(data, dict_size, start_value, end_value)
: dict_size - sizeof(int) * pp->num_valids;
break;
}
}
if (t == 0) {
// TODO check for overflow
pp->str_bytes = str_bytes;
}
}
/**
* @brief Kernel for computing the string column data stored in the pages
*
* This function will write the page data and the page data's validity to the
* output specified in the page's column chunk.
*
* This version uses a single warp to do the string copies.
*
* @param pages List of pages
* @param chunks List of column chunks
* @param min_row Row index to start reading at
* @param num_rows Maximum number of rows to read
* @tparam level_t Type used to store decoded repetition and definition levels
*/
template <typename level_t>
__global__ void __launch_bounds__(decode_block_size) gpuDecodeStringPageData(
PageInfo* pages, device_span<ColumnChunkDesc const> chunks, size_t min_row, size_t num_rows)
{
__shared__ __align__(16) page_state_s state_g;
__shared__ __align__(4) size_type last_offset;
__shared__ __align__(16)
page_state_buffers_s<rolling_buf_size, rolling_buf_size, rolling_buf_size>
state_buffers;
page_state_s* const s = &state_g;
auto* const sb = &state_buffers;
int const page_idx = blockIdx.x;
int const t = threadIdx.x;
[[maybe_unused]] null_count_back_copier _{s, t};
if (!setupLocalPageInfo(
s, &pages[page_idx], chunks, min_row, num_rows, mask_filter{KERNEL_MASK_STRING}, true)) {
return;
}
bool const has_repetition = s->col.max_level[level_type::REPETITION] > 0;
// offsets are local to the page
if (t == 0) { last_offset = 0; }
__syncthreads();
int const out_thread0 = s->dict_base && s->dict_bits == 0 ? 32 : 64;
int const leaf_level_index = s->col.max_nesting_depth - 1;
PageNestingDecodeInfo* const nesting_info_base = s->nesting_info;
__shared__ level_t rep[rolling_buf_size]; // circular buffer of repetition level values
__shared__ level_t def[rolling_buf_size]; // circular buffer of definition level values
// skipped_leaf_values will always be 0 for flat hierarchies.
uint32_t skipped_leaf_values = s->page.skipped_leaf_values;
while (!s->error && (s->input_value_count < s->num_input_values || s->src_pos < s->nz_count)) {
int target_pos;
int src_pos = s->src_pos;
if (t < out_thread0) {
target_pos = min(src_pos + 2 * (decode_block_size - out_thread0),
s->nz_count + (decode_block_size - out_thread0));
} else {
target_pos = min(s->nz_count, src_pos + decode_block_size - out_thread0);
if (out_thread0 > 32) { target_pos = min(target_pos, s->dict_pos); }
}
__syncthreads();
if (t < 32) {
// decode repetition and definition levels.
// - update validity vectors
// - updates offsets (for nested columns)
// - produces non-NULL value indices in s->nz_idx for subsequent decoding
gpuDecodeLevels<rolling_buf_size, level_t>(s, sb, target_pos, rep, def, t);
} else if (t < out_thread0) {
// skipped_leaf_values will always be 0 for flat hierarchies.
uint32_t src_target_pos = target_pos + skipped_leaf_values;
// WARP1: Decode dictionary indices, booleans or string positions
if (s->dict_base) {
src_target_pos = gpuDecodeDictionaryIndices<false>(s, sb, src_target_pos, t & 0x1f).first;
} else {
gpuInitStringDescriptors<false>(s, sb, src_target_pos, t & 0x1f);
}
if (t == 32) { *(volatile int32_t*)&s->dict_pos = src_target_pos; }
} else {
int const me = t - out_thread0;
// WARP1..WARP3: Decode values
src_pos += t - out_thread0;
// the position in the output column/buffer
int dst_pos = sb->nz_idx[rolling_index<rolling_buf_size>(src_pos)];
// for the flat hierarchy case we will be reading from the beginning of the value stream,
// regardless of the value of first_row. so adjust our destination offset accordingly.
// example:
// - user has passed skip_rows = 2, so our first_row to output is 2
// - the row values we get from nz_idx will be
// 0, 1, 2, 3, 4 ....
// - by shifting these values by first_row, the sequence becomes
// -1, -2, 0, 1, 2 ...
// - so we will end up ignoring the first two input rows, and input rows 2..n will
// get written to the output starting at position 0.
//
if (!has_repetition) { dst_pos -= s->first_row; }
// need to do this before we branch on src_pos/dst_pos so we don't deadlock
// choose a character parallel string copy when the average string is longer than a warp
using cudf::detail::warp_size;
auto const use_char_ll =
s->page.num_valids > 0 && (s->page.str_bytes / s->page.num_valids) >= warp_size;
if (me < warp_size) {
for (int i = 0; i < decode_block_size - out_thread0; i += warp_size) {
dst_pos = sb->nz_idx[rolling_index<rolling_buf_size>(src_pos + i)];
if (!has_repetition) { dst_pos -= s->first_row; }
auto [ptr, len] = src_pos + i < target_pos && dst_pos >= 0
? gpuGetStringData(s, sb, src_pos + skipped_leaf_values + i)
: cuda::std::pair<char const*, size_t>{nullptr, 0};
__shared__ cub::WarpScan<size_type>::TempStorage temp_storage;
size_type offset;
cub::WarpScan<size_type>(temp_storage).ExclusiveSum(len, offset);
offset += last_offset;
if (use_char_ll) {
__shared__ __align__(8) uint8_t const* pointers[warp_size];
__shared__ __align__(4) size_type offsets[warp_size];
__shared__ __align__(4) int dsts[warp_size];
__shared__ __align__(4) int lengths[warp_size];
offsets[me] = offset;
pointers[me] = reinterpret_cast<uint8_t const*>(ptr);
dsts[me] = dst_pos;
lengths[me] = len;
__syncwarp();
for (int ss = 0; ss < warp_size && ss + i + s->src_pos < target_pos; ss++) {
if (dsts[ss] >= 0) {
auto offptr =
reinterpret_cast<int32_t*>(nesting_info_base[leaf_level_index].data_out) +
dsts[ss];
*offptr = lengths[ss];
auto str_ptr = nesting_info_base[leaf_level_index].string_out + offsets[ss];
ll_strcpy(str_ptr, pointers[ss], lengths[ss], me);
}
}
} else {
if (src_pos + i < target_pos && dst_pos >= 0) {
auto offptr =
reinterpret_cast<int32_t*>(nesting_info_base[leaf_level_index].data_out) + dst_pos;
*offptr = len;
auto str_ptr = nesting_info_base[leaf_level_index].string_out + offset;
memcpy(str_ptr, ptr, len);
}
__syncwarp();
}
// last thread in warp updates last_offset
if (me == warp_size - 1) { last_offset = offset + len; }
__syncwarp();
}
}
if (t == out_thread0) { *(volatile int32_t*)&s->src_pos = target_pos; }
}
__syncthreads();
}
// now turn array of lengths into offsets
int value_count = nesting_info_base[leaf_level_index].value_count;
// if no repetition we haven't calculated start/end bounds and instead just skipped
// values until we reach first_row. account for that here.
if (!has_repetition) { value_count -= s->first_row; }
auto const offptr = reinterpret_cast<size_type*>(nesting_info_base[leaf_level_index].data_out);
block_excl_sum<decode_block_size>(offptr, value_count, s->page.str_offset);
}
} // anonymous namespace
/**
* @copydoc cudf::io::parquet::gpu::ComputePageStringSizes
*/
void ComputePageStringSizes(cudf::detail::hostdevice_vector<PageInfo>& pages,
cudf::detail::hostdevice_vector<ColumnChunkDesc> const& chunks,
size_t min_row,
size_t num_rows,
int level_type_size,
rmm::cuda_stream_view stream)
{
dim3 dim_block(preprocess_block_size, 1);
dim3 dim_grid(pages.size(), 1); // 1 threadblock per page
if (level_type_size == 1) {
hipLaunchKernelGGL(( gpuComputePageStringSizes<uint8_t>)
, dim3(dim_grid), dim3(dim_block), 0, stream.value(), pages.device_ptr(), chunks, min_row, num_rows);
} else {
hipLaunchKernelGGL(( gpuComputePageStringSizes<uint16_t>)
, dim3(dim_grid), dim3(dim_block), 0, stream.value(), pages.device_ptr(), chunks, min_row, num_rows);
}
}
/**
* @copydoc cudf::io::parquet::gpu::DecodeStringPageData
*/
void __host__ DecodeStringPageData(cudf::detail::hostdevice_vector<PageInfo>& pages,
cudf::detail::hostdevice_vector<ColumnChunkDesc> const& chunks,
size_t num_rows,
size_t min_row,
int level_type_size,
rmm::cuda_stream_view stream)
{
CUDF_EXPECTS(pages.size() > 0, "There is no page to decode");
dim3 dim_block(decode_block_size, 1);
dim3 dim_grid(pages.size(), 1); // 1 threadblock per page
if (level_type_size == 1) {
hipLaunchKernelGGL(( gpuDecodeStringPageData<uint8_t>)
, dim3(dim_grid), dim3(dim_block), 0, stream.value(), pages.device_ptr(), chunks, min_row, num_rows);
} else {
hipLaunchKernelGGL(( gpuDecodeStringPageData<uint16_t>)
, dim3(dim_grid), dim3(dim_block), 0, stream.value(), pages.device_ptr(), chunks, min_row, num_rows);
}
}
} // namespace gpu
} // namespace parquet
} // namespace io
} // namespace cudf
| f7a7e6c2a75b50a949c8f7f35c857a84ad0e196f.cu | /*
* Copyright (c) 2018-2023, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include "page_decode.cuh"
#include "page_string_utils.cuh"
#include <cudf/detail/utilities/cuda.cuh>
#include <cudf/strings/detail/gather.cuh>
namespace cudf {
namespace io {
namespace parquet {
namespace gpu {
namespace {
constexpr int preprocess_block_size = 512;
constexpr int decode_block_size = 128;
constexpr int rolling_buf_size = decode_block_size * 2;
constexpr int preproc_buf_size = LEVEL_DECODE_BUF_SIZE;
/**
* @brief Compute the start and end page value bounds for this page
*
* This uses definition and repetition level info to determine the number of valid and null
* values for the page, taking into account skip_rows/num_rows (if set).
*
* @param s The local page info
* @param min_row Row index to start reading at
* @param num_rows Maximum number of rows to read
* @param is_bounds_pg True if this page is clipped
* @param has_repetition True if the schema is nested
* @param decoders Definition and repetition level decoders
* @return pair containing start and end value indexes
* @tparam level_t Type used to store decoded repetition and definition levels
* @tparam rle_buf_size Size of the buffer used when decoding repetition and definition levels
*/
template <typename level_t, int rle_buf_size>
__device__ thrust::pair<int, int> page_bounds(page_state_s* const s,
size_t min_row,
size_t num_rows,
bool is_bounds_pg,
bool has_repetition,
rle_stream<level_t, rle_buf_size>* decoders)
{
using block_reduce = cub::BlockReduce<int, preprocess_block_size>;
using block_scan = cub::BlockScan<int, preprocess_block_size>;
__shared__ union {
typename block_reduce::TempStorage reduce_storage;
typename block_scan::TempStorage scan_storage;
} temp_storage;
auto const t = threadIdx.x;
// decode batches of level stream data using rle_stream objects and use the results to
// calculate start and end value positions in the encoded string data.
int const max_depth = s->col.max_nesting_depth;
int const max_def = s->nesting_info[max_depth - 1].max_def_level;
// can skip all this if we know there are no nulls
if (max_def == 0 && !is_bounds_pg) {
s->page.num_valids = s->num_input_values;
s->page.num_nulls = 0;
return {0, s->num_input_values};
}
int start_value = 0;
int end_value = s->page.num_input_values;
auto const pp = &s->page;
auto const col = &s->col;
// initialize the stream decoders (requires values computed in setupLocalPageInfo)
auto const def_decode = reinterpret_cast<level_t*>(pp->lvl_decode_buf[level_type::DEFINITION]);
auto const rep_decode = reinterpret_cast<level_t*>(pp->lvl_decode_buf[level_type::REPETITION]);
decoders[level_type::DEFINITION].init(s->col.level_bits[level_type::DEFINITION],
s->abs_lvl_start[level_type::DEFINITION],
s->abs_lvl_end[level_type::DEFINITION],
preproc_buf_size,
def_decode,
s->page.num_input_values);
// only need repetition if this is a bounds page. otherwise all we need is def level info
// to count the nulls.
if (has_repetition && is_bounds_pg) {
decoders[level_type::REPETITION].init(s->col.level_bits[level_type::REPETITION],
s->abs_lvl_start[level_type::REPETITION],
s->abs_lvl_end[level_type::REPETITION],
preproc_buf_size,
rep_decode,
s->page.num_input_values);
}
int processed = 0;
// if this is a bounds page, we need to do extra work to find the start and/or end value index
if (is_bounds_pg) {
__shared__ int skipped_values;
__shared__ int skipped_leaf_values;
__shared__ int last_input_value;
__shared__ int end_val_idx;
// need these for skip_rows case
auto const page_start_row = col->start_row + pp->chunk_row;
auto const max_row = min_row + num_rows;
auto const begin_row = page_start_row >= min_row ? 0 : min_row - page_start_row;
auto const max_page_rows = pp->num_rows - begin_row;
auto const page_rows = page_start_row + begin_row + max_page_rows <= max_row
? max_page_rows
: max_row - (page_start_row + begin_row);
auto end_row = begin_row + page_rows;
int row_fudge = -1;
// short circuit for no nulls
if (max_def == 0 && !has_repetition) {
if (t == 0) {
pp->num_nulls = 0;
pp->num_valids = end_row - begin_row;
}
return {begin_row, end_row};
}
int row_count = 0;
int leaf_count = 0;
bool skipped_values_set = false;
bool end_value_set = false;
while (processed < s->page.num_input_values) {
thread_index_type start_val = processed;
if (has_repetition) {
decoders[level_type::REPETITION].decode_next(t);
__syncthreads();
// special case where page does not begin at a row boundary
if (processed == 0 && rep_decode[0] != 0) {
if (t == 0) {
skipped_values = 0;
skipped_leaf_values = 0;
}
skipped_values_set = true;
end_row++; // need to finish off the previous row
row_fudge = 0;
}
}
// the # of rep/def levels will always be the same size
processed += decoders[level_type::DEFINITION].decode_next(t);
__syncthreads();
// do something with the level data
while (start_val < processed) {
auto const idx_t = start_val + t;
auto const idx = rolling_index<preproc_buf_size>(idx_t);
// get absolute thread row index
int is_new_row = idx_t < processed && (!has_repetition || rep_decode[idx] == 0);
int thread_row_count, block_row_count;
block_scan(temp_storage.scan_storage)
.InclusiveSum(is_new_row, thread_row_count, block_row_count);
__syncthreads();
// get absolute thread leaf index
int const is_new_leaf = idx_t < processed && (def_decode[idx] >= max_def);
int thread_leaf_count, block_leaf_count;
block_scan(temp_storage.scan_storage)
.InclusiveSum(is_new_leaf, thread_leaf_count, block_leaf_count);
__syncthreads();
// if we have not set skipped values yet, see if we found the first in-bounds row
if (!skipped_values_set && row_count + block_row_count > begin_row) {
// if this thread is in row bounds
int const row_index = thread_row_count + row_count - 1;
int const in_row_bounds =
idx_t < processed && (row_index >= begin_row) && (row_index < end_row);
int local_count, global_count;
block_scan(temp_storage.scan_storage)
.InclusiveSum(in_row_bounds, local_count, global_count);
__syncthreads();
// we found it
if (global_count > 0) {
// this is the thread that represents the first row. need to test in_row_bounds for
// the case where we only want one row and local_count == 1 for many threads.
if (local_count == 1 && in_row_bounds) {
skipped_values = idx_t;
skipped_leaf_values =
leaf_count + (is_new_leaf ? thread_leaf_count - 1 : thread_leaf_count);
}
skipped_values_set = true;
}
}
// test if row_count will exceed end_row in this batch
if (!end_value_set && row_count + block_row_count >= end_row) {
// if this thread exceeds row bounds. row_fudge change depending on whether we've faked
// the end row to account for starting a page in the middle of a row.
int const row_index = thread_row_count + row_count + row_fudge;
int const exceeds_row_bounds = row_index >= end_row;
int local_count, global_count;
block_scan(temp_storage.scan_storage)
.InclusiveSum(exceeds_row_bounds, local_count, global_count);
__syncthreads();
// we found it
if (global_count > 0) {
// this is the thread that represents the end row.
if (local_count == 1) {
last_input_value = idx_t;
end_val_idx = leaf_count + (is_new_leaf ? thread_leaf_count - 1 : thread_leaf_count);
}
end_value_set = true;
break;
}
}
row_count += block_row_count;
leaf_count += block_leaf_count;
start_val += preprocess_block_size;
}
__syncthreads();
if (end_value_set) { break; }
}
start_value = skipped_values_set ? skipped_leaf_values : 0;
end_value = end_value_set ? end_val_idx : leaf_count;
if (t == 0) {
int const v0 = skipped_values_set ? skipped_values : 0;
int const vn = end_value_set ? last_input_value : s->num_input_values;
int const total_values = vn - v0;
int const total_leaf_values = end_value - start_value;
int const num_nulls = total_values - total_leaf_values;
pp->num_nulls = num_nulls;
pp->num_valids = total_leaf_values;
}
}
// already filtered out unwanted pages, so need to count all non-null values in this page
else {
int num_nulls = 0;
while (processed < s->page.num_input_values) {
thread_index_type start_val = processed;
processed += decoders[level_type::DEFINITION].decode_next(t);
__syncthreads();
while (start_val < processed) {
auto const idx_t = start_val + t;
if (idx_t < processed) {
auto const idx = rolling_index<preproc_buf_size>(idx_t);
if (def_decode[idx] < max_def) { num_nulls++; }
}
start_val += preprocess_block_size;
}
__syncthreads();
}
int const null_count = block_reduce(temp_storage.reduce_storage).Sum(num_nulls);
if (t == 0) {
pp->num_nulls = null_count;
pp->num_valids = pp->num_input_values - null_count;
}
__syncthreads();
end_value -= pp->num_nulls;
}
return {start_value, end_value};
}
/**
* @brief Compute string size information for dictionary encoded strings.
*
* @param data Pointer to the start of the page data stream
* @param dict_base Pointer to the start of the dictionary
* @param dict_bits The number of bits used to in the dictionary bit packing
* @param dict_size Size of the dictionary in bytes
* @param data_size Size of the page data in bytes
* @param start_value Do not count values that occur before this index
* @param end_value Do not count values that occur after this index
*/
__device__ size_t totalDictEntriesSize(uint8_t const* data,
uint8_t const* dict_base,
int dict_bits,
int dict_size,
int data_size,
int start_value,
int end_value)
{
int const t = threadIdx.x;
uint8_t const* ptr = data;
uint8_t const* const end = data + data_size;
int const bytecnt = (dict_bits + 7) >> 3;
size_t l_str_len = 0; // partial sums across threads
int pos = 0; // current value index in the data stream
int t0 = 0; // thread 0 for this batch
int dict_run = 0;
int dict_val = 0;
while (pos < end_value && ptr <= end) {
if (dict_run <= 1) {
dict_run = (ptr < end) ? get_vlq32(ptr, end) : 0;
if (!(dict_run & 1)) {
// Repeated value
if (ptr + bytecnt <= end) {
int32_t run_val = ptr[0];
if (bytecnt > 1) {
run_val |= ptr[1] << 8;
if (bytecnt > 2) {
run_val |= ptr[2] << 16;
if (bytecnt > 3) { run_val |= ptr[3] << 24; }
}
}
dict_val = run_val & ((1 << dict_bits) - 1);
}
ptr += bytecnt;
}
}
int batch_len;
if (dict_run & 1) {
// Literal batch: must output a multiple of 8, except for the last batch
int batch_len_div8;
batch_len = max(min(preprocess_block_size, (int)(dict_run >> 1) * 8), 1);
batch_len_div8 = (batch_len + 7) >> 3;
dict_run -= batch_len_div8 * 2;
ptr += batch_len_div8 * dict_bits;
} else {
batch_len = dict_run >> 1;
dict_run = 0;
}
int const is_literal = dict_run & 1;
// calculate my thread id for this batch. way to round-robin the work.
int mytid = t - t0;
if (mytid < 0) mytid += preprocess_block_size;
// compute dictionary index.
if (is_literal) {
int dict_idx = 0;
if (mytid < batch_len) {
dict_idx = dict_val;
int32_t ofs = (mytid - ((batch_len + 7) & ~7)) * dict_bits;
const uint8_t* p = ptr + (ofs >> 3);
ofs &= 7;
if (p < end) {
uint32_t c = 8 - ofs;
dict_idx = (*p++) >> ofs;
if (c < dict_bits && p < end) {
dict_idx |= (*p++) << c;
c += 8;
if (c < dict_bits && p < end) {
dict_idx |= (*p++) << c;
c += 8;
if (c < dict_bits && p < end) { dict_idx |= (*p++) << c; }
}
}
dict_idx &= (1 << dict_bits) - 1;
}
if (pos + mytid < end_value) {
uint32_t const dict_pos = (dict_bits > 0) ? dict_idx * sizeof(string_index_pair) : 0;
if (pos + mytid >= start_value && dict_pos < (uint32_t)dict_size) {
const auto* src = reinterpret_cast<const string_index_pair*>(dict_base + dict_pos);
l_str_len += src->second;
}
}
}
t0 += batch_len;
} else {
int const start_off =
(pos < start_value && pos + batch_len > start_value) ? start_value - pos : 0;
batch_len = min(batch_len, end_value - pos);
if (mytid == 0) {
uint32_t const dict_pos = (dict_bits > 0) ? dict_val * sizeof(string_index_pair) : 0;
if (pos + batch_len > start_value && dict_pos < (uint32_t)dict_size) {
const auto* src = reinterpret_cast<const string_index_pair*>(dict_base + dict_pos);
l_str_len += (batch_len - start_off) * src->second;
}
}
t0 += 1;
}
t0 = t0 % preprocess_block_size;
pos += batch_len;
}
__syncthreads();
using block_reduce = cub::BlockReduce<size_t, preprocess_block_size>;
__shared__ typename block_reduce::TempStorage reduce_storage;
size_t sum_l = block_reduce(reduce_storage).Sum(l_str_len);
return sum_l;
}
/**
* @brief Compute string size information for plain encoded strings.
*
* @param data Pointer to the start of the page data stream
* @param data_size Length of data
* @param start_value Do not count values that occur before this index
* @param end_value Do not count values that occur after this index
*/
__device__ size_t totalPlainEntriesSize(uint8_t const* data,
int data_size,
int start_value,
int end_value)
{
int const t = threadIdx.x;
int pos = 0;
size_t total_len = 0;
// This step is purely serial
if (!t) {
const uint8_t* cur = data;
int k = 0;
while (pos < end_value && k < data_size) {
int len;
if (k + 4 <= data_size) {
len = (cur[k]) | (cur[k + 1] << 8) | (cur[k + 2] << 16) | (cur[k + 3] << 24);
k += 4;
if (k + len > data_size) { len = 0; }
} else {
len = 0;
}
k += len;
if (pos >= start_value) { total_len += len; }
pos++;
}
}
return total_len;
}
/**
* @brief Kernel for computing string page output size information.
*
* String columns need accurate data size information to preallocate memory in the column buffer to
* store the char data. This calls a kernel to calculate information needed by the string decoding
* kernel. On exit, the `str_bytes`, `num_nulls`, and `num_valids` fields of the PageInfo struct
* are updated. This call ignores non-string columns.
*
* @param pages All pages to be decoded
* @param chunks All chunks to be decoded
* @param min_rows crop all rows below min_row
* @param num_rows Maximum number of rows to read
* @tparam level_t Type used to store decoded repetition and definition levels
*/
template <typename level_t>
__global__ void __launch_bounds__(preprocess_block_size) gpuComputePageStringSizes(
PageInfo* pages, device_span<ColumnChunkDesc const> chunks, size_t min_row, size_t num_rows)
{
__shared__ __align__(16) page_state_s state_g;
page_state_s* const s = &state_g;
int const page_idx = blockIdx.x;
int const t = threadIdx.x;
PageInfo* const pp = &pages[page_idx];
// reset str_bytes to 0 in case it's already been calculated
if (t == 0) { pp->str_bytes = 0; }
// whether or not we have repetition levels (lists)
bool const has_repetition = chunks[pp->chunk_idx].max_level[level_type::REPETITION] > 0;
// the required number of runs in shared memory we will need to provide the
// rle_stream object
constexpr int rle_run_buffer_size = rle_stream_required_run_buffer_size<preprocess_block_size>();
// the level stream decoders
__shared__ rle_run<level_t> def_runs[rle_run_buffer_size];
__shared__ rle_run<level_t> rep_runs[rle_run_buffer_size];
rle_stream<level_t, preprocess_block_size> decoders[level_type::NUM_LEVEL_TYPES] = {{def_runs},
{rep_runs}};
// setup page info
if (!setupLocalPageInfo(
s, pp, chunks, min_row, num_rows, mask_filter{KERNEL_MASK_STRING}, false)) {
return;
}
if (!t) {
s->page.num_nulls = 0;
s->page.num_valids = 0;
s->page.str_bytes = 0;
}
__syncthreads();
bool const is_bounds_pg = is_bounds_page(s, min_row, num_rows, has_repetition);
// if we're skipping this page anyway, no need to count it
if (!is_bounds_pg && !is_page_contained(s, min_row, num_rows)) { return; }
// find start/end value indices
auto const [start_value, end_value] =
page_bounds(s, min_row, num_rows, is_bounds_pg, has_repetition, decoders);
// need to save num_nulls and num_valids calculated in page_bounds in this page
if (t == 0) {
pp->num_nulls = s->page.num_nulls;
pp->num_valids = s->page.num_valids;
}
auto const& col = s->col;
size_t str_bytes = 0;
// short circuit for FIXED_LEN_BYTE_ARRAY
if ((col.data_type & 7) == FIXED_LEN_BYTE_ARRAY) {
str_bytes = pp->num_valids * s->dtype_len_in;
} else {
// now process string info in the range [start_value, end_value)
// set up for decoding strings...can be either plain or dictionary
uint8_t const* data = s->data_start;
uint8_t const* const end = s->data_end;
uint8_t const* dict_base = nullptr;
int dict_size = 0;
switch (pp->encoding) {
case Encoding::PLAIN_DICTIONARY:
case Encoding::RLE_DICTIONARY:
// RLE-packed dictionary indices, first byte indicates index length in bits
if (col.str_dict_index) {
// String dictionary: use index
dict_base = reinterpret_cast<const uint8_t*>(col.str_dict_index);
dict_size = col.page_info[0].num_input_values * sizeof(string_index_pair);
} else {
dict_base = col.page_info[0].page_data; // dictionary is always stored in the first page
dict_size = col.page_info[0].uncompressed_page_size;
}
// FIXME: need to return an error condition...this won't actually do anything
if (s->dict_bits > 32 || !dict_base) { CUDF_UNREACHABLE("invalid dictionary bit size"); }
str_bytes = totalDictEntriesSize(
data, dict_base, s->dict_bits, dict_size, (end - data), start_value, end_value);
break;
case Encoding::PLAIN:
dict_size = static_cast<int32_t>(end - data);
str_bytes = is_bounds_pg ? totalPlainEntriesSize(data, dict_size, start_value, end_value)
: dict_size - sizeof(int) * pp->num_valids;
break;
}
}
if (t == 0) {
// TODO check for overflow
pp->str_bytes = str_bytes;
}
}
/**
* @brief Kernel for computing the string column data stored in the pages
*
* This function will write the page data and the page data's validity to the
* output specified in the page's column chunk.
*
* This version uses a single warp to do the string copies.
*
* @param pages List of pages
* @param chunks List of column chunks
* @param min_row Row index to start reading at
* @param num_rows Maximum number of rows to read
* @tparam level_t Type used to store decoded repetition and definition levels
*/
template <typename level_t>
__global__ void __launch_bounds__(decode_block_size) gpuDecodeStringPageData(
PageInfo* pages, device_span<ColumnChunkDesc const> chunks, size_t min_row, size_t num_rows)
{
__shared__ __align__(16) page_state_s state_g;
__shared__ __align__(4) size_type last_offset;
__shared__ __align__(16)
page_state_buffers_s<rolling_buf_size, rolling_buf_size, rolling_buf_size>
state_buffers;
page_state_s* const s = &state_g;
auto* const sb = &state_buffers;
int const page_idx = blockIdx.x;
int const t = threadIdx.x;
[[maybe_unused]] null_count_back_copier _{s, t};
if (!setupLocalPageInfo(
s, &pages[page_idx], chunks, min_row, num_rows, mask_filter{KERNEL_MASK_STRING}, true)) {
return;
}
bool const has_repetition = s->col.max_level[level_type::REPETITION] > 0;
// offsets are local to the page
if (t == 0) { last_offset = 0; }
__syncthreads();
int const out_thread0 = s->dict_base && s->dict_bits == 0 ? 32 : 64;
int const leaf_level_index = s->col.max_nesting_depth - 1;
PageNestingDecodeInfo* const nesting_info_base = s->nesting_info;
__shared__ level_t rep[rolling_buf_size]; // circular buffer of repetition level values
__shared__ level_t def[rolling_buf_size]; // circular buffer of definition level values
// skipped_leaf_values will always be 0 for flat hierarchies.
uint32_t skipped_leaf_values = s->page.skipped_leaf_values;
while (!s->error && (s->input_value_count < s->num_input_values || s->src_pos < s->nz_count)) {
int target_pos;
int src_pos = s->src_pos;
if (t < out_thread0) {
target_pos = min(src_pos + 2 * (decode_block_size - out_thread0),
s->nz_count + (decode_block_size - out_thread0));
} else {
target_pos = min(s->nz_count, src_pos + decode_block_size - out_thread0);
if (out_thread0 > 32) { target_pos = min(target_pos, s->dict_pos); }
}
__syncthreads();
if (t < 32) {
// decode repetition and definition levels.
// - update validity vectors
// - updates offsets (for nested columns)
// - produces non-NULL value indices in s->nz_idx for subsequent decoding
gpuDecodeLevels<rolling_buf_size, level_t>(s, sb, target_pos, rep, def, t);
} else if (t < out_thread0) {
// skipped_leaf_values will always be 0 for flat hierarchies.
uint32_t src_target_pos = target_pos + skipped_leaf_values;
// WARP1: Decode dictionary indices, booleans or string positions
if (s->dict_base) {
src_target_pos = gpuDecodeDictionaryIndices<false>(s, sb, src_target_pos, t & 0x1f).first;
} else {
gpuInitStringDescriptors<false>(s, sb, src_target_pos, t & 0x1f);
}
if (t == 32) { *(volatile int32_t*)&s->dict_pos = src_target_pos; }
} else {
int const me = t - out_thread0;
// WARP1..WARP3: Decode values
src_pos += t - out_thread0;
// the position in the output column/buffer
int dst_pos = sb->nz_idx[rolling_index<rolling_buf_size>(src_pos)];
// for the flat hierarchy case we will be reading from the beginning of the value stream,
// regardless of the value of first_row. so adjust our destination offset accordingly.
// example:
// - user has passed skip_rows = 2, so our first_row to output is 2
// - the row values we get from nz_idx will be
// 0, 1, 2, 3, 4 ....
// - by shifting these values by first_row, the sequence becomes
// -1, -2, 0, 1, 2 ...
// - so we will end up ignoring the first two input rows, and input rows 2..n will
// get written to the output starting at position 0.
//
if (!has_repetition) { dst_pos -= s->first_row; }
// need to do this before we branch on src_pos/dst_pos so we don't deadlock
// choose a character parallel string copy when the average string is longer than a warp
using cudf::detail::warp_size;
auto const use_char_ll =
s->page.num_valids > 0 && (s->page.str_bytes / s->page.num_valids) >= warp_size;
if (me < warp_size) {
for (int i = 0; i < decode_block_size - out_thread0; i += warp_size) {
dst_pos = sb->nz_idx[rolling_index<rolling_buf_size>(src_pos + i)];
if (!has_repetition) { dst_pos -= s->first_row; }
auto [ptr, len] = src_pos + i < target_pos && dst_pos >= 0
? gpuGetStringData(s, sb, src_pos + skipped_leaf_values + i)
: cuda::std::pair<char const*, size_t>{nullptr, 0};
__shared__ cub::WarpScan<size_type>::TempStorage temp_storage;
size_type offset;
cub::WarpScan<size_type>(temp_storage).ExclusiveSum(len, offset);
offset += last_offset;
if (use_char_ll) {
__shared__ __align__(8) uint8_t const* pointers[warp_size];
__shared__ __align__(4) size_type offsets[warp_size];
__shared__ __align__(4) int dsts[warp_size];
__shared__ __align__(4) int lengths[warp_size];
offsets[me] = offset;
pointers[me] = reinterpret_cast<uint8_t const*>(ptr);
dsts[me] = dst_pos;
lengths[me] = len;
__syncwarp();
for (int ss = 0; ss < warp_size && ss + i + s->src_pos < target_pos; ss++) {
if (dsts[ss] >= 0) {
auto offptr =
reinterpret_cast<int32_t*>(nesting_info_base[leaf_level_index].data_out) +
dsts[ss];
*offptr = lengths[ss];
auto str_ptr = nesting_info_base[leaf_level_index].string_out + offsets[ss];
ll_strcpy(str_ptr, pointers[ss], lengths[ss], me);
}
}
} else {
if (src_pos + i < target_pos && dst_pos >= 0) {
auto offptr =
reinterpret_cast<int32_t*>(nesting_info_base[leaf_level_index].data_out) + dst_pos;
*offptr = len;
auto str_ptr = nesting_info_base[leaf_level_index].string_out + offset;
memcpy(str_ptr, ptr, len);
}
__syncwarp();
}
// last thread in warp updates last_offset
if (me == warp_size - 1) { last_offset = offset + len; }
__syncwarp();
}
}
if (t == out_thread0) { *(volatile int32_t*)&s->src_pos = target_pos; }
}
__syncthreads();
}
// now turn array of lengths into offsets
int value_count = nesting_info_base[leaf_level_index].value_count;
// if no repetition we haven't calculated start/end bounds and instead just skipped
// values until we reach first_row. account for that here.
if (!has_repetition) { value_count -= s->first_row; }
auto const offptr = reinterpret_cast<size_type*>(nesting_info_base[leaf_level_index].data_out);
block_excl_sum<decode_block_size>(offptr, value_count, s->page.str_offset);
}
} // anonymous namespace
/**
* @copydoc cudf::io::parquet::gpu::ComputePageStringSizes
*/
void ComputePageStringSizes(cudf::detail::hostdevice_vector<PageInfo>& pages,
cudf::detail::hostdevice_vector<ColumnChunkDesc> const& chunks,
size_t min_row,
size_t num_rows,
int level_type_size,
rmm::cuda_stream_view stream)
{
dim3 dim_block(preprocess_block_size, 1);
dim3 dim_grid(pages.size(), 1); // 1 threadblock per page
if (level_type_size == 1) {
gpuComputePageStringSizes<uint8_t>
<<<dim_grid, dim_block, 0, stream.value()>>>(pages.device_ptr(), chunks, min_row, num_rows);
} else {
gpuComputePageStringSizes<uint16_t>
<<<dim_grid, dim_block, 0, stream.value()>>>(pages.device_ptr(), chunks, min_row, num_rows);
}
}
/**
* @copydoc cudf::io::parquet::gpu::DecodeStringPageData
*/
void __host__ DecodeStringPageData(cudf::detail::hostdevice_vector<PageInfo>& pages,
cudf::detail::hostdevice_vector<ColumnChunkDesc> const& chunks,
size_t num_rows,
size_t min_row,
int level_type_size,
rmm::cuda_stream_view stream)
{
CUDF_EXPECTS(pages.size() > 0, "There is no page to decode");
dim3 dim_block(decode_block_size, 1);
dim3 dim_grid(pages.size(), 1); // 1 threadblock per page
if (level_type_size == 1) {
gpuDecodeStringPageData<uint8_t>
<<<dim_grid, dim_block, 0, stream.value()>>>(pages.device_ptr(), chunks, min_row, num_rows);
} else {
gpuDecodeStringPageData<uint16_t>
<<<dim_grid, dim_block, 0, stream.value()>>>(pages.device_ptr(), chunks, min_row, num_rows);
}
}
} // namespace gpu
} // namespace parquet
} // namespace io
} // namespace cudf
|
04d08458153e72dc16021214b77f204962629c91.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/*
* Copyright 2013 Ben Barsdell
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
/*
TODO: [Done]Add overlap frames to properly allow pixel delays
[Done]Add support for per-source (and pixel) delays, just like weights
[Done]Add interface for passing in and returning to device memory
Test code
CPU implementation
Benchmarks
[Done]Getter functions
[Done]Documentation
Further hardening
[Done]Python wrapper
C++ wrapper
*/
#include "light_curve_extractor.h"
#include <cstdio>
#include <cassert>
#include <thrust/device_vector.h>
#include <thrust/transform.h>
#include <thrust/remove.h>
#include <thrust/binary_search.h>
#include <thrust/sort.h>
// Only for testing
#include "stopwatch.hpp"
typedef unsigned char uchar;
texture<uchar, hipTextureType3D, hipReadModeNormalizedFloat> t_data_8bit;
texture<ushort, hipTextureType3D, hipReadModeNormalizedFloat> t_data_16bit;
texture<float, hipTextureType3D, hipReadModeElementType> t_data_32bit;
// d_source_pixels contains the Cartesian coords (x,y,z) and weight (w) of
// each pixel in each cell.
template<int BLOCK_SIZE, int BITDEPTH>
__global__
void extract_light_curves_kernel(uint nframes,
uint nsources,
const uint* __restrict__ c_source_offsets,
const uint* __restrict__ c_source_npixels,
const float4* d_source_pixels,
float* d_light_curves) {
uint frame0 = threadIdx.x + blockIdx.x*BLOCK_SIZE;
uint source = blockIdx.y;
// Load constant values
uint source_offset = c_source_offsets[source];
uint source_npixels = c_source_npixels[source];
// Manually-managed shared memory cache for source pixel info
__shared__ float4 s_source_pixels[BLOCK_SIZE];
// Loop over whole grids of threads
// Note: Must pad to multiple of block size due to use of smem/syncthreads
uint nframes_padded = ((nframes - 1) / BLOCK_SIZE + 1) * BLOCK_SIZE;
for( uint frame=frame0; frame<nframes_padded; frame+=BLOCK_SIZE*gridDim.x ) {
// Sequentially sum up pixels contributing to this source
float sum = 0.f;
for( uint pb=0; pb<source_npixels; pb+=BLOCK_SIZE ) {
// Take care of the last block potentially being smaller
uint block_size = min(BLOCK_SIZE, source_npixels-pb);
// Cache a line of pixel coords/weight for this source
uint p = pb + threadIdx.x;
if( threadIdx.x < block_size ) {
s_source_pixels[threadIdx.x] = d_source_pixels[source_offset + p];
}
__syncthreads();
// Sum pixels in the block
for( uint pi=0; pi<block_size; ++pi ) {
float4 pxl = s_source_pixels[pi];
float val;
switch( BITDEPTH ) {
case 8:
val = tex3D(t_data_8bit,
pxl.x+.5f, pxl.y+.5f, pxl.z+.5f + frame);
val *= (1<<8)-1; // Un-normalise back to integer scale
break;
case 16:
val = tex3D(t_data_16bit,
pxl.x+.5f, pxl.y+.5f, pxl.z+.5f + frame);
val *= (1<<16)-1; // Un-normalise back to integer scale
break;
case 32:
val = tex3D(t_data_32bit,
pxl.x+.5f, pxl.y+.5f, pxl.z+.5f + frame);
break;
}
sum += pxl.w * val;
}
}
// Write the summed frame back to global mem
// Note: It's now safe to crop excess threads
if( frame < nframes ) {
//d_light_curves[frame + nframes*source] = sum;
// Note: Frame-major allows stiching frames together easily, but
// here means non-coalesced memory writes. It doesn't
// appear to make a significant difference to the run time.
d_light_curves[source + nsources*frame] = sum;
}
}
}
#if defined(LCE_DEBUG) && LCE_DEBUG
#define throw_error(err) { \
fprintf(stderr, "LCE error (%s:%i): %s\n", \
__FILE__, __LINE__, \
lce_get_error_string(err)); \
return err; \
}
#define throw_cuda_error(cuda_err, err) { \
fprintf(stderr, "LCE GPU error (%s:%i): %s\n", \
__FILE__, __LINE__, \
hipGetErrorString(cuda_err)); \
return err; \
}
#else
#define throw_error(err) return err
#define throw_cuda_error(cuda_err, err) return err
#endif
// Internal (tuning) parameters
enum {
LCE_NBUF = 2,
LCE_KERNEL_BLOCK_SIZE = 128,
LCE_DEFAULT_NFRAMES_DEVICE = 64
};
struct lce_plan_t {
lce_size width;
lce_size height;
lce_size bitdepth;
lce_size nframes_device;
int device_idx;
lce_size overlap;
thrust::device_vector<float> d_pixel_weights;
thrust::device_vector<float> d_pixel_delays;
hipStream_t streams[LCE_NBUF];
hipArray* a_data[LCE_NBUF];
hipEvent_t finished_event;
lce_size nsources;
thrust::device_vector<uint> d_source_offsets;
thrust::device_vector<uint> d_source_npixels;
thrust::device_vector<float4> d_source_pixels;
thrust::device_vector<float> d_light_curves;
};
bool multiple_bits_set(unsigned int flags) {
return ((flags & (flags - 1)) == 0);
}
lce_error allocate_data_arrays(lce_plan plan) {
hipError_t cuda_error;
for( int buf=0; buf<LCE_NBUF; ++buf ) {
// Free existing allocation if present
if( plan->a_data[buf] ) {
cuda_error = hipFreeArray(plan->a_data[buf]);
if( cuda_error != hipSuccess ) {
throw_cuda_error(cuda_error, LCE_GPU_ERROR);
}
}
// Allocate a 3D CUDA array for later binding to a texture
hipChannelFormatKind formatkind;
switch( plan->bitdepth ) {
case 8:
case 16: formatkind = hipChannelFormatKindUnsigned; break;
case 32: formatkind = hipChannelFormatKindFloat; break;
default: throw_error(LCE_INVALID_BITDEPTH);
}
hipChannelFormatDesc channel_desc =
hipCreateChannelDesc(plan->bitdepth, 0, 0, 0,
formatkind);
hipExtent extent = make_hipExtent(plan->width, plan->height,
plan->nframes_device
+ plan->overlap);
unsigned int allocflags = 0;
cuda_error = hipMalloc3DArray(&plan->a_data[buf],
&channel_desc,
extent,
allocflags);
if( cuda_error != hipSuccess ) {
throw_cuda_error(cuda_error, LCE_MEM_ALLOC_FAILED);
}
}
return LCE_NO_ERROR;
}
lce_size lce_get_width(const lce_plan plan) { return plan->width; }
lce_size lce_get_height(const lce_plan plan) { return plan->height; }
lce_size lce_get_bitdepth(const lce_plan plan) { return plan->bitdepth; }
lce_size lce_get_nframes_device(const lce_plan plan) { return plan->nframes_device; }
int lce_get_device_idx(const lce_plan plan) { return plan->device_idx; }
lce_size lce_get_nsources(const lce_plan plan) { return plan->nsources; }
lce_size lce_get_max_delay(const lce_plan plan) { return plan->overlap; }
lce_error lce_get_pixel_weights(const lce_plan plan,
float* pixel_weights) {
thrust::copy(plan->d_pixel_weights.begin(),
plan->d_pixel_weights.end(),
pixel_weights);
return LCE_NO_ERROR;
}
lce_error lce_get_pixel_delays(const lce_plan plan,
float* pixel_delays) {
thrust::copy(plan->d_pixel_delays.begin(),
plan->d_pixel_delays.end(),
pixel_delays);
return LCE_NO_ERROR;
}
lce_error lce_set_nframes_device(lce_plan plan,
lce_size nframes_device) {
if( nframes_device == 0 ) {
throw_error(LCE_INVALID_NFRAMES);
}
plan->nframes_device = nframes_device;
lce_error err = allocate_data_arrays(plan);
if( err != LCE_NO_ERROR ) {
throw_error(err);
}
return LCE_NO_ERROR;
}
lce_error lce_create(lce_plan* plan,
lce_size width,
lce_size height,
lce_size bitdepth,
int device_idx,
const float* pixel_weights,
const float* pixel_delays) {
/*
printf("Width = %u\n", width);
printf("Height = %u\n", height);
printf("Bitdepth = %u\n", bitdepth);
printf("Device = %i\n", device_idx);
printf("Weights = %p\n", pixel_weights);
printf("Delays = %p\n", pixel_delays);
*/
if( !(bitdepth == 8 ||
bitdepth == 16 ||
bitdepth == 32) ) {
throw_error(LCE_INVALID_BITDEPTH);
}
lce_plan newplan = new lce_plan_t;
if( !newplan ) {
throw_error(LCE_MEM_ALLOC_FAILED);
}
// TODO: Do careful clean-up of dynamic allocations in here when
// something fails mid-way through the function.
newplan->width = width;
newplan->height = height;
newplan->bitdepth = bitdepth;
newplan->nframes_device = LCE_DEFAULT_NFRAMES_DEVICE;
newplan->device_idx = device_idx;
newplan->nsources = 0;
hipError_t cuda_error;
// Create a CUDA event to record when computation has finished
cuda_error = hipEventCreateWithFlags(&newplan->finished_event,
hipEventDisableTiming |
hipEventBlockingSync);
if( cuda_error != hipSuccess ) {
throw_cuda_error(cuda_error, LCE_GPU_ERROR);
}
cuda_error = hipSetDevice(device_idx);
if( cuda_error != hipSuccess ) {
if( cuda_error == hipErrorInvalidDevice ) {
throw_cuda_error(cuda_error, LCE_INVALID_DEVICE);
}
else {
throw_cuda_error(cuda_error, LCE_GPU_ERROR);
}
}
// TODO: Check the assign and resize calls for exceptions
if( pixel_weights ) {
newplan->d_pixel_weights.assign(pixel_weights,
pixel_weights + width*height);
}
if( pixel_delays ) {
newplan->d_pixel_delays.assign(pixel_delays,
pixel_delays + width*height);
float max_delay = *thrust::max_element(newplan->d_pixel_delays.begin(),
newplan->d_pixel_delays.end());
newplan->overlap = ceil(max_delay);
}
else {
newplan->overlap = 0;
}
for( int buf=0; buf<LCE_NBUF; ++buf ) {
hipStreamCreate(&newplan->streams[buf]);
newplan->a_data[buf] = 0;
}
lce_error err = allocate_data_arrays(newplan);
if( err != LCE_NO_ERROR ) {
throw_error(err);
}
*plan = newplan;
// Set textures to return 0 when out-of-bounds, and to use
// linear interpolation.
t_data_8bit.addressMode[0] = hipAddressModeBorder;
t_data_8bit.addressMode[1] = hipAddressModeBorder;
t_data_8bit.addressMode[2] = hipAddressModeBorder;
t_data_8bit.filterMode = hipFilterModeLinear;
t_data_8bit.normalized = false;
t_data_16bit.addressMode[0] = hipAddressModeBorder;
t_data_16bit.addressMode[1] = hipAddressModeBorder;
t_data_16bit.addressMode[2] = hipAddressModeBorder;
t_data_16bit.filterMode = hipFilterModeLinear;
t_data_16bit.normalized = false;
t_data_32bit.addressMode[0] = hipAddressModeBorder;
t_data_32bit.addressMode[1] = hipAddressModeBorder;
t_data_32bit.addressMode[2] = hipAddressModeBorder;
t_data_32bit.filterMode = hipFilterModeLinear;
t_data_32bit.normalized = false;
return LCE_NO_ERROR;
}
void lce_destroy(lce_plan plan) {
if( !plan ) {
return;
}
hipSetDevice(plan->device_idx);
hipEventDestroy(plan->finished_event);
for( int buf=0; buf<LCE_NBUF; ++buf ) {
hipStreamDestroy(plan->streams[buf]);
hipFreeArray(plan->a_data[buf]);
}
delete plan;
}
template<typename T>
struct abs_less_equal_val : public thrust::unary_function<T,bool> {
T val;
abs_less_equal_val(T val_) : val(val_) {}
inline __host__ __device__
bool operator()(T x) const {
return fabs(x) <= val;
}
};
template<typename T>
struct multiply_by : public thrust::unary_function<T,T> {
T val;
multiply_by(T val_) : val(val_) {}
inline __host__ __device__
T operator()(T x) const {
return x * val;
}
};
struct get_spatial_sort_index : public thrust::unary_function<uint, uint> {
uint imsize;
uint width;
get_spatial_sort_index(uint imsize_, uint width_)
: imsize(imsize_), width(width_) {}
// This code was copied from http://graphics.stanford.edu/~seander/bithacks.html
inline __host__ __device__
uint get_zindex(uint x, uint y) const {
const uint B[] = {0x55555555, 0x33333333, 0x0F0F0F0F, 0x00FF00FF};
const uint S[] = {1, 2, 4, 8};
// Interleave lower 16 bits of x and y, so the bits of x
// are in the even positions and bits from y in the odd;
// x and y must initially be less than 65536.
x = (x | (x << S[3])) & B[3];
x = (x | (x << S[2])) & B[2];
x = (x | (x << S[1])) & B[1];
x = (x | (x << S[0])) & B[0];
y = (y | (y << S[3])) & B[3];
y = (y | (y << S[2])) & B[2];
y = (y | (y << S[1])) & B[1];
y = (y | (y << S[0])) & B[0];
uint z = x | (y << 1);
return z;
}
inline __host__ __device__
uint operator()(uint idx) const {
uint src_idx = idx / imsize;
uint pxl_idx = idx % imsize;
uint x = pxl_idx % width;
uint y = pxl_idx / width;
uint zindex = get_zindex(x, y);
uint sort_index = zindex + imsize*src_idx;
return sort_index;
}
};
struct gen_source_pixel_table
: public thrust::binary_function<uint, void, float4> {
uint imsize;
uint width;
const float* pixel_delays;
const float* pixel_weights;
gen_source_pixel_table(uint imsize_, uint width_,
const float* pixel_delays_,
const float* pixel_weights_)
: imsize(imsize_), width(width_),
pixel_delays(pixel_delays_),
pixel_weights(pixel_weights_) {}
template<typename Tuple>
inline __host__ __device__
float4 operator()(uint idx, Tuple wd) const {
float weight = thrust::get<0>(wd);
float delay = thrust::get<1>(wd);
uint pxl_idx = idx % imsize;
float4 pxl;
pxl.x = pxl_idx % width;
pxl.y = pxl_idx / width;
pxl.z = delay + pixel_delays[pxl_idx];
pxl.w = weight * pixel_weights[pxl_idx];
return pxl;
}
};
template<typename T>
struct z_less_than : public thrust::binary_function<T,T,bool> {
inline __host__ __device__
bool operator()(T a, T b) const {
return a.z < b.z;
}
};
lce_error lce_set_source_weights_by_image(lce_plan plan,
lce_size nsources,
const float* source_weights,
float zero_thresh,
const float* source_delays) {
if( !plan ) {
throw_error(LCE_INVALID_PLAN);
}
hipSetDevice(plan->device_idx);
plan->nsources = nsources;
// TODO: It's possible that some applications will actually have few/no
// zeros in the source weights. In these cases, we could actually
// store the dense source_weights matrices and use sgemm instead
// of the custom 'sparse weights' kernel.
// Would have to guess or autotune the tipping point between the
// efficiency of the two algorithms.
using thrust::make_counting_iterator;
using thrust::make_transform_iterator;
using thrust::make_zip_iterator;
using thrust::make_tuple;
// Copy weights to device
size_t imsize = plan->width * plan->height;
thrust::device_vector<float> d_weights(source_weights,
source_weights + nsources*imsize);
// Copy delays to device
thrust::device_vector<float> d_delays;
if( !source_delays ) {
d_delays.resize(nsources*imsize, 0.f);
}
else {
d_delays.assign(source_delays,
source_delays + nsources*imsize);
}
// Compact a list of indices by removing zero weights
thrust::device_vector<uint> d_inds(nsources*imsize);
thrust::device_vector<uint>::iterator end_iter;
end_iter = thrust::remove_copy_if(make_counting_iterator<uint>(0),
make_counting_iterator<uint>(nsources*imsize),
d_weights.begin(),
d_inds.begin(),
abs_less_equal_val<float>(zero_thresh));
d_inds.resize(end_iter - d_inds.begin());
// Find each source's offset into the compacted list of indices
plan->d_source_offsets.resize(nsources);
thrust::lower_bound(d_inds.begin(), d_inds.end(),
make_transform_iterator(make_counting_iterator<uint>(0),
multiply_by<uint>(imsize)),
make_transform_iterator(make_counting_iterator<uint>(nsources),
multiply_by<uint>(imsize)),
plan->d_source_offsets.begin());
// Difference adjacent offsets to find the number of pixels in each source
plan->d_source_npixels.resize(nsources);
plan->d_source_offsets.push_back(d_inds.size());
thrust::transform(plan->d_source_offsets.begin()+1,
plan->d_source_offsets.end(),
plan->d_source_offsets.begin(),
plan->d_source_npixels.begin(),
thrust::minus<uint>());
// Spatially sort inds (e.g., by Z order)
// TODO: This has yet to prove its worth
thrust::device_vector<uint> d_spatial_sort_keys(d_inds.size());
thrust::transform(d_inds.begin(), d_inds.end(),
d_spatial_sort_keys.begin(),
get_spatial_sort_index(imsize, plan->width));
thrust::sort_by_key(d_spatial_sort_keys.begin(),
d_spatial_sort_keys.end(),
d_inds.begin());
if( plan->d_pixel_delays.empty() ) {
plan->d_pixel_delays.resize(imsize, 0.f);
}
if( plan->d_pixel_weights.empty() ) {
plan->d_pixel_weights.resize(imsize, 1.f);
}
// Generate source pixel lookup values as:
// float4(col, row, delay, weight*pixel_weight)
const float* d_pixel_delays_ptr = thrust::raw_pointer_cast(&plan->d_pixel_delays[0]);
const float* d_pixel_weights_ptr = thrust::raw_pointer_cast(&plan->d_pixel_weights[0]);
plan->d_source_pixels.resize(d_inds.size());
thrust::transform(d_inds.begin(), d_inds.end(),
make_permutation_iterator(make_zip_iterator(make_tuple(d_weights.begin(),
d_delays.begin())),
d_inds.begin()),
plan->d_source_pixels.begin(),
gen_source_pixel_table(imsize, plan->width,
d_pixel_delays_ptr,
d_pixel_weights_ptr));
// Adjust required overlap based on actual max delay
float4 max_val = *thrust::max_element(plan->d_source_pixels.begin(),
plan->d_source_pixels.end(),
z_less_than<float4>());
float max_delay = max_val.z;
plan->overlap = ceil(max_delay);
// Check for illegal negative delays
float4 min_val = *thrust::min_element(plan->d_source_pixels.begin(),
plan->d_source_pixels.end(),
z_less_than<float4>());
float min_delay = min_val.z;
if( min_delay < 0.f ) {
throw_error(LCE_INVALID_DELAY);
}
// Allocate output memory space
plan->d_light_curves.resize(nsources*plan->nframes_device);
return LCE_NO_ERROR;
}
/*
// TODO: Implement this if there is a motivating use-case
int lce_set_source_weights_by_pixel(lce_plan plan,
lce_size nsources,
const lce_size* source_npixels,
const int** source_coords,
const float** source_weights) {
}
*/
lce_error copy_input(const lce_plan plan,
const void* data,
int buf,
unsigned int flags) {
assert(plan != 0);
hipError_t error;
hipStream_t stream = plan->streams[buf];
error = hipGetLastError();
if( error != hipSuccess ) {
throw_cuda_error(error, LCE_MEM_COPY_FAILED);
}
hipMemcpy3DParms copyParams = {0};
copyParams.srcPtr = make_hipPitchedPtr((void*)data,
plan->width*plan->bitdepth/8,
plan->width,
plan->height);
copyParams.dstArray = plan->a_data[buf];
copyParams.extent = make_hipExtent(plan->width,
plan->height,
plan->nframes_device + plan->overlap);
if( !(flags & LCE_INPUT_MASK) ) {
flags |= LCE_DEFAULT_INPUT;
}
else if( multiple_bits_set(flags & LCE_INPUT_MASK) ) {
throw_error(LCE_INVALID_FLAGS);
}
switch( flags ) {
case LCE_HOST_INPUT: copyParams.kind = hipMemcpyHostToDevice; break;
case LCE_DEVICE_INPUT: copyParams.kind = hipMemcpyDeviceToDevice; break;
default: copyParams.kind = hipMemcpyHostToDevice;
}
error = hipMemcpy3DAsync(©Params, stream);
if( error != hipSuccess ) {
throw_cuda_error(error, LCE_MEM_COPY_FAILED);
}
return LCE_NO_ERROR;
}
lce_error compute(const lce_plan plan,
float* light_curves,
int buf,
unsigned int flags) {
assert(plan != 0);
enum { BLOCK_SIZE = LCE_KERNEL_BLOCK_SIZE };
hipStream_t stream = plan->streams[buf];
hipError_t cuda_error;
hipArray* a_data = plan->a_data[buf];
const lce_size* d_source_offsets_ptr = thrust::raw_pointer_cast(&plan->d_source_offsets[0]);
const lce_size* d_source_npixels_ptr = thrust::raw_pointer_cast(&plan->d_source_npixels[0]);
const float4* d_source_pixels_ptr = thrust::raw_pointer_cast(&plan->d_source_pixels[0]);
float* d_light_curves_ptr = thrust::raw_pointer_cast(&plan->d_light_curves[0]);
//Stopwatch timer;
//timer.start();
// TODO: Does this slow things down significantly? Could it be done during init, with separate textures for each buf?
switch( plan->bitdepth ) {
case 8: cuda_error = hipBindTextureToArray(t_data_8bit, a_data); break;
case 16: cuda_error = hipBindTextureToArray(t_data_16bit, a_data); break;
case 32: cuda_error = hipBindTextureToArray(t_data_32bit, a_data); break;
default: throw_error(LCE_INVALID_BITDEPTH);
}
if( cuda_error != hipSuccess ) {
throw_cuda_error(cuda_error, LCE_GPU_ERROR);
}
//timer.stop();
//printf("Texture bind time = %f\n", timer.getTime());
// Compute thread decomposition
size_t nframe_blocks = (plan->nframes_device - 1) / BLOCK_SIZE + 1;
dim3 block(BLOCK_SIZE);
dim3 grid(nframe_blocks,
plan->nsources);
// Dynamically dispatch on bitdepth and execute GPU kernel
switch( plan->bitdepth ) {
case 8: hipLaunchKernelGGL(( extract_light_curves_kernel<BLOCK_SIZE, 8>), dim3(grid), dim3(block), 0, stream,
plan->nframes_device,plan->nsources,d_source_offsets_ptr,
d_source_npixels_ptr,d_source_pixels_ptr,d_light_curves_ptr); break;
case 16:hipLaunchKernelGGL(( extract_light_curves_kernel<BLOCK_SIZE, 16>), dim3(grid), dim3(block), 0, stream,
plan->nframes_device,plan->nsources,d_source_offsets_ptr,
d_source_npixels_ptr,d_source_pixels_ptr,d_light_curves_ptr); break;
case 32:hipLaunchKernelGGL(( extract_light_curves_kernel<BLOCK_SIZE, 32>), dim3(grid), dim3(block), 0, stream,
plan->nframes_device,plan->nsources,d_source_offsets_ptr,
d_source_npixels_ptr,d_source_pixels_ptr,d_light_curves_ptr); break;
default: throw_error(LCE_INVALID_BITDEPTH);
}
#if defined(LCE_DEBUG) && LCE_DEBUG
// Note: Error-checking the kernel disables asynchronous execution
hipStreamSynchronize(stream);
cuda_error = hipGetLastError();
if( cuda_error != hipSuccess ) {
throw_cuda_error(cuda_error, LCE_GPU_ERROR);
}
#endif
// Copy results back to host
if( !(flags & LCE_OUTPUT_MASK) ) {
flags = LCE_DEFAULT_OUTPUT;
}
else if( multiple_bits_set(flags & LCE_OUTPUT_MASK) ) {
throw_error(LCE_INVALID_FLAGS);
}
hipMemcpyKind memcpykind;
switch( flags ) {
case LCE_HOST_OUTPUT: memcpykind = hipMemcpyDeviceToHost; break;
case LCE_DEVICE_OUTPUT: memcpykind = hipMemcpyDeviceToDevice; break;
default: memcpykind = hipMemcpyDeviceToHost;
}
size_t light_curve_nbytes = (plan->nframes_device * plan->nsources
* sizeof(float));
cuda_error = hipMemcpyAsync((void*)light_curves, (void*)d_light_curves_ptr,
light_curve_nbytes, memcpykind,
stream);
if( cuda_error != hipSuccess ) {
throw_cuda_error(cuda_error, LCE_MEM_COPY_FAILED);
}
return LCE_NO_ERROR;
}
lce_error lce_execute(const lce_plan plan,
lce_size nframes,
const void* data,
float* light_curves,
unsigned int flags) {
if( !plan ) {
throw_error(LCE_INVALID_PLAN);
}
lce_size nframes_computed = nframes - plan->overlap;
if( nframes_computed % plan->nframes_device != 0 ) {
throw_error(LCE_INVALID_NFRAMES);
}
// TODO: Remove this when done benchmarking (or integrate properly)
Stopwatch timer;
timer.start();
hipSetDevice(plan->device_idx);
size_t npipe = nframes_computed / plan->nframes_device;
size_t frame_nbytes = plan->width*plan->height*plan->bitdepth/8;
size_t in_stride = plan->nframes_device * frame_nbytes;
size_t out_stride = plan->nframes_device * plan->nsources;
size_t pipe = 0;
copy_input(plan, (char*)data + pipe*in_stride,
pipe % LCE_NBUF, flags);
while( pipe < npipe-1 ) {
copy_input(plan, (char*)data + (pipe+1)*in_stride,
(pipe+1) % LCE_NBUF, flags);
compute(plan, light_curves + pipe*out_stride,
pipe % LCE_NBUF, flags);
++pipe;
}
compute(plan, light_curves + pipe*out_stride,
pipe % LCE_NBUF, flags);
// Record an event so we can check when computation is finished
hipEventRecord(plan->finished_event, 0);
if( !(flags & LCE_SYNC_MASK) ) {
flags |= LCE_DEFAULT_SYNC;
}
else if( multiple_bits_set(flags & LCE_SYNC_MASK) ) {
throw_error(LCE_INVALID_FLAGS);
}
if( flags & LCE_SYNC ) {
lce_error err = lce_synchronize(plan);
if( err != LCE_NO_ERROR ) {
throw_error(err);
}
}
// TODO: Remove this when done benchmarking (or integrate properly)
timer.stop();
printf("lce_execute time = %f s\n", timer.getTime());
printf(" = %f fps\n", (nframes-plan->overlap)/timer.getTime());
return LCE_NO_ERROR;
}
lce_error lce_query_status(const lce_plan plan) {
hipSetDevice(plan->device_idx);
hipError_t cuda_error = hipEventQuery(plan->finished_event);
if( cuda_error == hipSuccess ) {
return LCE_NO_ERROR;
}
else if( cuda_error == hipErrorNotReady ) {
return LCE_NOT_READY;
}
else {
throw_cuda_error(cuda_error, LCE_GPU_ERROR);
}
}
lce_error lce_synchronize(const lce_plan plan) {
hipSetDevice(plan->device_idx);
hipEventSynchronize(plan->finished_event);
hipError_t error = hipGetLastError();
if( error != hipSuccess ) {
throw_cuda_error(error, LCE_GPU_ERROR);
}
return LCE_NO_ERROR;
}
const char* lce_get_error_string(lce_error error) {
switch( error ) {
case LCE_NO_ERROR:
return "No error";
case LCE_INVALID_PLAN:
return "Invalid plan";
case LCE_INVALID_BITDEPTH:
return "Invalid bitdepth";
case LCE_INVALID_NFRAMES:
return "Invalid nframes";
case LCE_INVALID_DEVICE:
return "Invalid GPU device index";
case LCE_INVALID_DELAY:
return "Invalid delay";
case LCE_MEM_ALLOC_FAILED:
return "Memory allocation failed";
case LCE_MEM_COPY_FAILED:
return "Memory copy failed";
case LCE_GPU_ERROR:
return "GPU error";
case LCE_UNKNOWN_ERROR:
return "Unknown error. Please contact the author(s).";
default:
return "Invalid error code";
}
}
lce_error lce_register_memory(void* ptr,
lce_size size) {
unsigned int flags = hipHostRegisterPortable;
hipError_t error = hipHostRegister(ptr, size, flags);
if( error != hipSuccess ) {
throw_cuda_error(error, LCE_GPU_ERROR);
}
return LCE_NO_ERROR;
}
lce_error lce_unregister_memory(void* ptr) {
hipError_t error = hipHostUnregister(ptr);
if( error != hipSuccess ) {
throw_cuda_error(error, LCE_GPU_ERROR);
}
return LCE_NO_ERROR;
}
| 04d08458153e72dc16021214b77f204962629c91.cu | /*
* Copyright 2013 Ben Barsdell
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
/*
TODO: [Done]Add overlap frames to properly allow pixel delays
[Done]Add support for per-source (and pixel) delays, just like weights
[Done]Add interface for passing in and returning to device memory
Test code
CPU implementation
Benchmarks
[Done]Getter functions
[Done]Documentation
Further hardening
[Done]Python wrapper
C++ wrapper
*/
#include "light_curve_extractor.h"
#include <cstdio>
#include <cassert>
#include <thrust/device_vector.h>
#include <thrust/transform.h>
#include <thrust/remove.h>
#include <thrust/binary_search.h>
#include <thrust/sort.h>
// Only for testing
#include "stopwatch.hpp"
typedef unsigned char uchar;
texture<uchar, cudaTextureType3D, cudaReadModeNormalizedFloat> t_data_8bit;
texture<ushort, cudaTextureType3D, cudaReadModeNormalizedFloat> t_data_16bit;
texture<float, cudaTextureType3D, cudaReadModeElementType> t_data_32bit;
// d_source_pixels contains the Cartesian coords (x,y,z) and weight (w) of
// each pixel in each cell.
template<int BLOCK_SIZE, int BITDEPTH>
__global__
void extract_light_curves_kernel(uint nframes,
uint nsources,
const uint* __restrict__ c_source_offsets,
const uint* __restrict__ c_source_npixels,
const float4* d_source_pixels,
float* d_light_curves) {
uint frame0 = threadIdx.x + blockIdx.x*BLOCK_SIZE;
uint source = blockIdx.y;
// Load constant values
uint source_offset = c_source_offsets[source];
uint source_npixels = c_source_npixels[source];
// Manually-managed shared memory cache for source pixel info
__shared__ float4 s_source_pixels[BLOCK_SIZE];
// Loop over whole grids of threads
// Note: Must pad to multiple of block size due to use of smem/syncthreads
uint nframes_padded = ((nframes - 1) / BLOCK_SIZE + 1) * BLOCK_SIZE;
for( uint frame=frame0; frame<nframes_padded; frame+=BLOCK_SIZE*gridDim.x ) {
// Sequentially sum up pixels contributing to this source
float sum = 0.f;
for( uint pb=0; pb<source_npixels; pb+=BLOCK_SIZE ) {
// Take care of the last block potentially being smaller
uint block_size = min(BLOCK_SIZE, source_npixels-pb);
// Cache a line of pixel coords/weight for this source
uint p = pb + threadIdx.x;
if( threadIdx.x < block_size ) {
s_source_pixels[threadIdx.x] = d_source_pixels[source_offset + p];
}
__syncthreads();
// Sum pixels in the block
for( uint pi=0; pi<block_size; ++pi ) {
float4 pxl = s_source_pixels[pi];
float val;
switch( BITDEPTH ) {
case 8:
val = tex3D(t_data_8bit,
pxl.x+.5f, pxl.y+.5f, pxl.z+.5f + frame);
val *= (1<<8)-1; // Un-normalise back to integer scale
break;
case 16:
val = tex3D(t_data_16bit,
pxl.x+.5f, pxl.y+.5f, pxl.z+.5f + frame);
val *= (1<<16)-1; // Un-normalise back to integer scale
break;
case 32:
val = tex3D(t_data_32bit,
pxl.x+.5f, pxl.y+.5f, pxl.z+.5f + frame);
break;
}
sum += pxl.w * val;
}
}
// Write the summed frame back to global mem
// Note: It's now safe to crop excess threads
if( frame < nframes ) {
//d_light_curves[frame + nframes*source] = sum;
// Note: Frame-major allows stiching frames together easily, but
// here means non-coalesced memory writes. It doesn't
// appear to make a significant difference to the run time.
d_light_curves[source + nsources*frame] = sum;
}
}
}
#if defined(LCE_DEBUG) && LCE_DEBUG
#define throw_error(err) { \
fprintf(stderr, "LCE error (%s:%i): %s\n", \
__FILE__, __LINE__, \
lce_get_error_string(err)); \
return err; \
}
#define throw_cuda_error(cuda_err, err) { \
fprintf(stderr, "LCE GPU error (%s:%i): %s\n", \
__FILE__, __LINE__, \
cudaGetErrorString(cuda_err)); \
return err; \
}
#else
#define throw_error(err) return err
#define throw_cuda_error(cuda_err, err) return err
#endif
// Internal (tuning) parameters
enum {
LCE_NBUF = 2,
LCE_KERNEL_BLOCK_SIZE = 128,
LCE_DEFAULT_NFRAMES_DEVICE = 64
};
struct lce_plan_t {
lce_size width;
lce_size height;
lce_size bitdepth;
lce_size nframes_device;
int device_idx;
lce_size overlap;
thrust::device_vector<float> d_pixel_weights;
thrust::device_vector<float> d_pixel_delays;
cudaStream_t streams[LCE_NBUF];
cudaArray* a_data[LCE_NBUF];
cudaEvent_t finished_event;
lce_size nsources;
thrust::device_vector<uint> d_source_offsets;
thrust::device_vector<uint> d_source_npixels;
thrust::device_vector<float4> d_source_pixels;
thrust::device_vector<float> d_light_curves;
};
bool multiple_bits_set(unsigned int flags) {
return ((flags & (flags - 1)) == 0);
}
lce_error allocate_data_arrays(lce_plan plan) {
cudaError_t cuda_error;
for( int buf=0; buf<LCE_NBUF; ++buf ) {
// Free existing allocation if present
if( plan->a_data[buf] ) {
cuda_error = cudaFreeArray(plan->a_data[buf]);
if( cuda_error != cudaSuccess ) {
throw_cuda_error(cuda_error, LCE_GPU_ERROR);
}
}
// Allocate a 3D CUDA array for later binding to a texture
cudaChannelFormatKind formatkind;
switch( plan->bitdepth ) {
case 8:
case 16: formatkind = cudaChannelFormatKindUnsigned; break;
case 32: formatkind = cudaChannelFormatKindFloat; break;
default: throw_error(LCE_INVALID_BITDEPTH);
}
cudaChannelFormatDesc channel_desc =
cudaCreateChannelDesc(plan->bitdepth, 0, 0, 0,
formatkind);
cudaExtent extent = make_cudaExtent(plan->width, plan->height,
plan->nframes_device
+ plan->overlap);
unsigned int allocflags = 0;
cuda_error = cudaMalloc3DArray(&plan->a_data[buf],
&channel_desc,
extent,
allocflags);
if( cuda_error != cudaSuccess ) {
throw_cuda_error(cuda_error, LCE_MEM_ALLOC_FAILED);
}
}
return LCE_NO_ERROR;
}
lce_size lce_get_width(const lce_plan plan) { return plan->width; }
lce_size lce_get_height(const lce_plan plan) { return plan->height; }
lce_size lce_get_bitdepth(const lce_plan plan) { return plan->bitdepth; }
lce_size lce_get_nframes_device(const lce_plan plan) { return plan->nframes_device; }
int lce_get_device_idx(const lce_plan plan) { return plan->device_idx; }
lce_size lce_get_nsources(const lce_plan plan) { return plan->nsources; }
lce_size lce_get_max_delay(const lce_plan plan) { return plan->overlap; }
lce_error lce_get_pixel_weights(const lce_plan plan,
float* pixel_weights) {
thrust::copy(plan->d_pixel_weights.begin(),
plan->d_pixel_weights.end(),
pixel_weights);
return LCE_NO_ERROR;
}
lce_error lce_get_pixel_delays(const lce_plan plan,
float* pixel_delays) {
thrust::copy(plan->d_pixel_delays.begin(),
plan->d_pixel_delays.end(),
pixel_delays);
return LCE_NO_ERROR;
}
lce_error lce_set_nframes_device(lce_plan plan,
lce_size nframes_device) {
if( nframes_device == 0 ) {
throw_error(LCE_INVALID_NFRAMES);
}
plan->nframes_device = nframes_device;
lce_error err = allocate_data_arrays(plan);
if( err != LCE_NO_ERROR ) {
throw_error(err);
}
return LCE_NO_ERROR;
}
lce_error lce_create(lce_plan* plan,
lce_size width,
lce_size height,
lce_size bitdepth,
int device_idx,
const float* pixel_weights,
const float* pixel_delays) {
/*
printf("Width = %u\n", width);
printf("Height = %u\n", height);
printf("Bitdepth = %u\n", bitdepth);
printf("Device = %i\n", device_idx);
printf("Weights = %p\n", pixel_weights);
printf("Delays = %p\n", pixel_delays);
*/
if( !(bitdepth == 8 ||
bitdepth == 16 ||
bitdepth == 32) ) {
throw_error(LCE_INVALID_BITDEPTH);
}
lce_plan newplan = new lce_plan_t;
if( !newplan ) {
throw_error(LCE_MEM_ALLOC_FAILED);
}
// TODO: Do careful clean-up of dynamic allocations in here when
// something fails mid-way through the function.
newplan->width = width;
newplan->height = height;
newplan->bitdepth = bitdepth;
newplan->nframes_device = LCE_DEFAULT_NFRAMES_DEVICE;
newplan->device_idx = device_idx;
newplan->nsources = 0;
cudaError_t cuda_error;
// Create a CUDA event to record when computation has finished
cuda_error = cudaEventCreateWithFlags(&newplan->finished_event,
cudaEventDisableTiming |
cudaEventBlockingSync);
if( cuda_error != cudaSuccess ) {
throw_cuda_error(cuda_error, LCE_GPU_ERROR);
}
cuda_error = cudaSetDevice(device_idx);
if( cuda_error != cudaSuccess ) {
if( cuda_error == cudaErrorInvalidDevice ) {
throw_cuda_error(cuda_error, LCE_INVALID_DEVICE);
}
else {
throw_cuda_error(cuda_error, LCE_GPU_ERROR);
}
}
// TODO: Check the assign and resize calls for exceptions
if( pixel_weights ) {
newplan->d_pixel_weights.assign(pixel_weights,
pixel_weights + width*height);
}
if( pixel_delays ) {
newplan->d_pixel_delays.assign(pixel_delays,
pixel_delays + width*height);
float max_delay = *thrust::max_element(newplan->d_pixel_delays.begin(),
newplan->d_pixel_delays.end());
newplan->overlap = ceil(max_delay);
}
else {
newplan->overlap = 0;
}
for( int buf=0; buf<LCE_NBUF; ++buf ) {
cudaStreamCreate(&newplan->streams[buf]);
newplan->a_data[buf] = 0;
}
lce_error err = allocate_data_arrays(newplan);
if( err != LCE_NO_ERROR ) {
throw_error(err);
}
*plan = newplan;
// Set textures to return 0 when out-of-bounds, and to use
// linear interpolation.
t_data_8bit.addressMode[0] = cudaAddressModeBorder;
t_data_8bit.addressMode[1] = cudaAddressModeBorder;
t_data_8bit.addressMode[2] = cudaAddressModeBorder;
t_data_8bit.filterMode = cudaFilterModeLinear;
t_data_8bit.normalized = false;
t_data_16bit.addressMode[0] = cudaAddressModeBorder;
t_data_16bit.addressMode[1] = cudaAddressModeBorder;
t_data_16bit.addressMode[2] = cudaAddressModeBorder;
t_data_16bit.filterMode = cudaFilterModeLinear;
t_data_16bit.normalized = false;
t_data_32bit.addressMode[0] = cudaAddressModeBorder;
t_data_32bit.addressMode[1] = cudaAddressModeBorder;
t_data_32bit.addressMode[2] = cudaAddressModeBorder;
t_data_32bit.filterMode = cudaFilterModeLinear;
t_data_32bit.normalized = false;
return LCE_NO_ERROR;
}
void lce_destroy(lce_plan plan) {
if( !plan ) {
return;
}
cudaSetDevice(plan->device_idx);
cudaEventDestroy(plan->finished_event);
for( int buf=0; buf<LCE_NBUF; ++buf ) {
cudaStreamDestroy(plan->streams[buf]);
cudaFreeArray(plan->a_data[buf]);
}
delete plan;
}
template<typename T>
struct abs_less_equal_val : public thrust::unary_function<T,bool> {
T val;
abs_less_equal_val(T val_) : val(val_) {}
inline __host__ __device__
bool operator()(T x) const {
return fabs(x) <= val;
}
};
template<typename T>
struct multiply_by : public thrust::unary_function<T,T> {
T val;
multiply_by(T val_) : val(val_) {}
inline __host__ __device__
T operator()(T x) const {
return x * val;
}
};
struct get_spatial_sort_index : public thrust::unary_function<uint, uint> {
uint imsize;
uint width;
get_spatial_sort_index(uint imsize_, uint width_)
: imsize(imsize_), width(width_) {}
// This code was copied from http://graphics.stanford.edu/~seander/bithacks.html
inline __host__ __device__
uint get_zindex(uint x, uint y) const {
const uint B[] = {0x55555555, 0x33333333, 0x0F0F0F0F, 0x00FF00FF};
const uint S[] = {1, 2, 4, 8};
// Interleave lower 16 bits of x and y, so the bits of x
// are in the even positions and bits from y in the odd;
// x and y must initially be less than 65536.
x = (x | (x << S[3])) & B[3];
x = (x | (x << S[2])) & B[2];
x = (x | (x << S[1])) & B[1];
x = (x | (x << S[0])) & B[0];
y = (y | (y << S[3])) & B[3];
y = (y | (y << S[2])) & B[2];
y = (y | (y << S[1])) & B[1];
y = (y | (y << S[0])) & B[0];
uint z = x | (y << 1);
return z;
}
inline __host__ __device__
uint operator()(uint idx) const {
uint src_idx = idx / imsize;
uint pxl_idx = idx % imsize;
uint x = pxl_idx % width;
uint y = pxl_idx / width;
uint zindex = get_zindex(x, y);
uint sort_index = zindex + imsize*src_idx;
return sort_index;
}
};
struct gen_source_pixel_table
: public thrust::binary_function<uint, void, float4> {
uint imsize;
uint width;
const float* pixel_delays;
const float* pixel_weights;
gen_source_pixel_table(uint imsize_, uint width_,
const float* pixel_delays_,
const float* pixel_weights_)
: imsize(imsize_), width(width_),
pixel_delays(pixel_delays_),
pixel_weights(pixel_weights_) {}
template<typename Tuple>
inline __host__ __device__
float4 operator()(uint idx, Tuple wd) const {
float weight = thrust::get<0>(wd);
float delay = thrust::get<1>(wd);
uint pxl_idx = idx % imsize;
float4 pxl;
pxl.x = pxl_idx % width;
pxl.y = pxl_idx / width;
pxl.z = delay + pixel_delays[pxl_idx];
pxl.w = weight * pixel_weights[pxl_idx];
return pxl;
}
};
template<typename T>
struct z_less_than : public thrust::binary_function<T,T,bool> {
inline __host__ __device__
bool operator()(T a, T b) const {
return a.z < b.z;
}
};
lce_error lce_set_source_weights_by_image(lce_plan plan,
lce_size nsources,
const float* source_weights,
float zero_thresh,
const float* source_delays) {
if( !plan ) {
throw_error(LCE_INVALID_PLAN);
}
cudaSetDevice(plan->device_idx);
plan->nsources = nsources;
// TODO: It's possible that some applications will actually have few/no
// zeros in the source weights. In these cases, we could actually
// store the dense source_weights matrices and use sgemm instead
// of the custom 'sparse weights' kernel.
// Would have to guess or autotune the tipping point between the
// efficiency of the two algorithms.
using thrust::make_counting_iterator;
using thrust::make_transform_iterator;
using thrust::make_zip_iterator;
using thrust::make_tuple;
// Copy weights to device
size_t imsize = plan->width * plan->height;
thrust::device_vector<float> d_weights(source_weights,
source_weights + nsources*imsize);
// Copy delays to device
thrust::device_vector<float> d_delays;
if( !source_delays ) {
d_delays.resize(nsources*imsize, 0.f);
}
else {
d_delays.assign(source_delays,
source_delays + nsources*imsize);
}
// Compact a list of indices by removing zero weights
thrust::device_vector<uint> d_inds(nsources*imsize);
thrust::device_vector<uint>::iterator end_iter;
end_iter = thrust::remove_copy_if(make_counting_iterator<uint>(0),
make_counting_iterator<uint>(nsources*imsize),
d_weights.begin(),
d_inds.begin(),
abs_less_equal_val<float>(zero_thresh));
d_inds.resize(end_iter - d_inds.begin());
// Find each source's offset into the compacted list of indices
plan->d_source_offsets.resize(nsources);
thrust::lower_bound(d_inds.begin(), d_inds.end(),
make_transform_iterator(make_counting_iterator<uint>(0),
multiply_by<uint>(imsize)),
make_transform_iterator(make_counting_iterator<uint>(nsources),
multiply_by<uint>(imsize)),
plan->d_source_offsets.begin());
// Difference adjacent offsets to find the number of pixels in each source
plan->d_source_npixels.resize(nsources);
plan->d_source_offsets.push_back(d_inds.size());
thrust::transform(plan->d_source_offsets.begin()+1,
plan->d_source_offsets.end(),
plan->d_source_offsets.begin(),
plan->d_source_npixels.begin(),
thrust::minus<uint>());
// Spatially sort inds (e.g., by Z order)
// TODO: This has yet to prove its worth
thrust::device_vector<uint> d_spatial_sort_keys(d_inds.size());
thrust::transform(d_inds.begin(), d_inds.end(),
d_spatial_sort_keys.begin(),
get_spatial_sort_index(imsize, plan->width));
thrust::sort_by_key(d_spatial_sort_keys.begin(),
d_spatial_sort_keys.end(),
d_inds.begin());
if( plan->d_pixel_delays.empty() ) {
plan->d_pixel_delays.resize(imsize, 0.f);
}
if( plan->d_pixel_weights.empty() ) {
plan->d_pixel_weights.resize(imsize, 1.f);
}
// Generate source pixel lookup values as:
// float4(col, row, delay, weight*pixel_weight)
const float* d_pixel_delays_ptr = thrust::raw_pointer_cast(&plan->d_pixel_delays[0]);
const float* d_pixel_weights_ptr = thrust::raw_pointer_cast(&plan->d_pixel_weights[0]);
plan->d_source_pixels.resize(d_inds.size());
thrust::transform(d_inds.begin(), d_inds.end(),
make_permutation_iterator(make_zip_iterator(make_tuple(d_weights.begin(),
d_delays.begin())),
d_inds.begin()),
plan->d_source_pixels.begin(),
gen_source_pixel_table(imsize, plan->width,
d_pixel_delays_ptr,
d_pixel_weights_ptr));
// Adjust required overlap based on actual max delay
float4 max_val = *thrust::max_element(plan->d_source_pixels.begin(),
plan->d_source_pixels.end(),
z_less_than<float4>());
float max_delay = max_val.z;
plan->overlap = ceil(max_delay);
// Check for illegal negative delays
float4 min_val = *thrust::min_element(plan->d_source_pixels.begin(),
plan->d_source_pixels.end(),
z_less_than<float4>());
float min_delay = min_val.z;
if( min_delay < 0.f ) {
throw_error(LCE_INVALID_DELAY);
}
// Allocate output memory space
plan->d_light_curves.resize(nsources*plan->nframes_device);
return LCE_NO_ERROR;
}
/*
// TODO: Implement this if there is a motivating use-case
int lce_set_source_weights_by_pixel(lce_plan plan,
lce_size nsources,
const lce_size* source_npixels,
const int** source_coords,
const float** source_weights) {
}
*/
lce_error copy_input(const lce_plan plan,
const void* data,
int buf,
unsigned int flags) {
assert(plan != 0);
cudaError_t error;
cudaStream_t stream = plan->streams[buf];
error = cudaGetLastError();
if( error != cudaSuccess ) {
throw_cuda_error(error, LCE_MEM_COPY_FAILED);
}
cudaMemcpy3DParms copyParams = {0};
copyParams.srcPtr = make_cudaPitchedPtr((void*)data,
plan->width*plan->bitdepth/8,
plan->width,
plan->height);
copyParams.dstArray = plan->a_data[buf];
copyParams.extent = make_cudaExtent(plan->width,
plan->height,
plan->nframes_device + plan->overlap);
if( !(flags & LCE_INPUT_MASK) ) {
flags |= LCE_DEFAULT_INPUT;
}
else if( multiple_bits_set(flags & LCE_INPUT_MASK) ) {
throw_error(LCE_INVALID_FLAGS);
}
switch( flags ) {
case LCE_HOST_INPUT: copyParams.kind = cudaMemcpyHostToDevice; break;
case LCE_DEVICE_INPUT: copyParams.kind = cudaMemcpyDeviceToDevice; break;
default: copyParams.kind = cudaMemcpyHostToDevice;
}
error = cudaMemcpy3DAsync(©Params, stream);
if( error != cudaSuccess ) {
throw_cuda_error(error, LCE_MEM_COPY_FAILED);
}
return LCE_NO_ERROR;
}
lce_error compute(const lce_plan plan,
float* light_curves,
int buf,
unsigned int flags) {
assert(plan != 0);
enum { BLOCK_SIZE = LCE_KERNEL_BLOCK_SIZE };
cudaStream_t stream = plan->streams[buf];
cudaError_t cuda_error;
cudaArray* a_data = plan->a_data[buf];
const lce_size* d_source_offsets_ptr = thrust::raw_pointer_cast(&plan->d_source_offsets[0]);
const lce_size* d_source_npixels_ptr = thrust::raw_pointer_cast(&plan->d_source_npixels[0]);
const float4* d_source_pixels_ptr = thrust::raw_pointer_cast(&plan->d_source_pixels[0]);
float* d_light_curves_ptr = thrust::raw_pointer_cast(&plan->d_light_curves[0]);
//Stopwatch timer;
//timer.start();
// TODO: Does this slow things down significantly? Could it be done during init, with separate textures for each buf?
switch( plan->bitdepth ) {
case 8: cuda_error = cudaBindTextureToArray(t_data_8bit, a_data); break;
case 16: cuda_error = cudaBindTextureToArray(t_data_16bit, a_data); break;
case 32: cuda_error = cudaBindTextureToArray(t_data_32bit, a_data); break;
default: throw_error(LCE_INVALID_BITDEPTH);
}
if( cuda_error != cudaSuccess ) {
throw_cuda_error(cuda_error, LCE_GPU_ERROR);
}
//timer.stop();
//printf("Texture bind time = %f\n", timer.getTime());
// Compute thread decomposition
size_t nframe_blocks = (plan->nframes_device - 1) / BLOCK_SIZE + 1;
dim3 block(BLOCK_SIZE);
dim3 grid(nframe_blocks,
plan->nsources);
// Dynamically dispatch on bitdepth and execute GPU kernel
switch( plan->bitdepth ) {
case 8: extract_light_curves_kernel<BLOCK_SIZE, 8><<<grid, block, 0, stream>>>
(plan->nframes_device,plan->nsources,d_source_offsets_ptr,
d_source_npixels_ptr,d_source_pixels_ptr,d_light_curves_ptr); break;
case 16: extract_light_curves_kernel<BLOCK_SIZE, 16><<<grid, block, 0, stream>>>
(plan->nframes_device,plan->nsources,d_source_offsets_ptr,
d_source_npixels_ptr,d_source_pixels_ptr,d_light_curves_ptr); break;
case 32: extract_light_curves_kernel<BLOCK_SIZE, 32><<<grid, block, 0, stream>>>
(plan->nframes_device,plan->nsources,d_source_offsets_ptr,
d_source_npixels_ptr,d_source_pixels_ptr,d_light_curves_ptr); break;
default: throw_error(LCE_INVALID_BITDEPTH);
}
#if defined(LCE_DEBUG) && LCE_DEBUG
// Note: Error-checking the kernel disables asynchronous execution
cudaStreamSynchronize(stream);
cuda_error = cudaGetLastError();
if( cuda_error != cudaSuccess ) {
throw_cuda_error(cuda_error, LCE_GPU_ERROR);
}
#endif
// Copy results back to host
if( !(flags & LCE_OUTPUT_MASK) ) {
flags = LCE_DEFAULT_OUTPUT;
}
else if( multiple_bits_set(flags & LCE_OUTPUT_MASK) ) {
throw_error(LCE_INVALID_FLAGS);
}
cudaMemcpyKind memcpykind;
switch( flags ) {
case LCE_HOST_OUTPUT: memcpykind = cudaMemcpyDeviceToHost; break;
case LCE_DEVICE_OUTPUT: memcpykind = cudaMemcpyDeviceToDevice; break;
default: memcpykind = cudaMemcpyDeviceToHost;
}
size_t light_curve_nbytes = (plan->nframes_device * plan->nsources
* sizeof(float));
cuda_error = cudaMemcpyAsync((void*)light_curves, (void*)d_light_curves_ptr,
light_curve_nbytes, memcpykind,
stream);
if( cuda_error != cudaSuccess ) {
throw_cuda_error(cuda_error, LCE_MEM_COPY_FAILED);
}
return LCE_NO_ERROR;
}
lce_error lce_execute(const lce_plan plan,
lce_size nframes,
const void* data,
float* light_curves,
unsigned int flags) {
if( !plan ) {
throw_error(LCE_INVALID_PLAN);
}
lce_size nframes_computed = nframes - plan->overlap;
if( nframes_computed % plan->nframes_device != 0 ) {
throw_error(LCE_INVALID_NFRAMES);
}
// TODO: Remove this when done benchmarking (or integrate properly)
Stopwatch timer;
timer.start();
cudaSetDevice(plan->device_idx);
size_t npipe = nframes_computed / plan->nframes_device;
size_t frame_nbytes = plan->width*plan->height*plan->bitdepth/8;
size_t in_stride = plan->nframes_device * frame_nbytes;
size_t out_stride = plan->nframes_device * plan->nsources;
size_t pipe = 0;
copy_input(plan, (char*)data + pipe*in_stride,
pipe % LCE_NBUF, flags);
while( pipe < npipe-1 ) {
copy_input(plan, (char*)data + (pipe+1)*in_stride,
(pipe+1) % LCE_NBUF, flags);
compute(plan, light_curves + pipe*out_stride,
pipe % LCE_NBUF, flags);
++pipe;
}
compute(plan, light_curves + pipe*out_stride,
pipe % LCE_NBUF, flags);
// Record an event so we can check when computation is finished
cudaEventRecord(plan->finished_event, 0);
if( !(flags & LCE_SYNC_MASK) ) {
flags |= LCE_DEFAULT_SYNC;
}
else if( multiple_bits_set(flags & LCE_SYNC_MASK) ) {
throw_error(LCE_INVALID_FLAGS);
}
if( flags & LCE_SYNC ) {
lce_error err = lce_synchronize(plan);
if( err != LCE_NO_ERROR ) {
throw_error(err);
}
}
// TODO: Remove this when done benchmarking (or integrate properly)
timer.stop();
printf("lce_execute time = %f s\n", timer.getTime());
printf(" = %f fps\n", (nframes-plan->overlap)/timer.getTime());
return LCE_NO_ERROR;
}
lce_error lce_query_status(const lce_plan plan) {
cudaSetDevice(plan->device_idx);
cudaError_t cuda_error = cudaEventQuery(plan->finished_event);
if( cuda_error == cudaSuccess ) {
return LCE_NO_ERROR;
}
else if( cuda_error == cudaErrorNotReady ) {
return LCE_NOT_READY;
}
else {
throw_cuda_error(cuda_error, LCE_GPU_ERROR);
}
}
lce_error lce_synchronize(const lce_plan plan) {
cudaSetDevice(plan->device_idx);
cudaEventSynchronize(plan->finished_event);
cudaError_t error = cudaGetLastError();
if( error != cudaSuccess ) {
throw_cuda_error(error, LCE_GPU_ERROR);
}
return LCE_NO_ERROR;
}
const char* lce_get_error_string(lce_error error) {
switch( error ) {
case LCE_NO_ERROR:
return "No error";
case LCE_INVALID_PLAN:
return "Invalid plan";
case LCE_INVALID_BITDEPTH:
return "Invalid bitdepth";
case LCE_INVALID_NFRAMES:
return "Invalid nframes";
case LCE_INVALID_DEVICE:
return "Invalid GPU device index";
case LCE_INVALID_DELAY:
return "Invalid delay";
case LCE_MEM_ALLOC_FAILED:
return "Memory allocation failed";
case LCE_MEM_COPY_FAILED:
return "Memory copy failed";
case LCE_GPU_ERROR:
return "GPU error";
case LCE_UNKNOWN_ERROR:
return "Unknown error. Please contact the author(s).";
default:
return "Invalid error code";
}
}
lce_error lce_register_memory(void* ptr,
lce_size size) {
unsigned int flags = cudaHostRegisterPortable;
cudaError_t error = cudaHostRegister(ptr, size, flags);
if( error != cudaSuccess ) {
throw_cuda_error(error, LCE_GPU_ERROR);
}
return LCE_NO_ERROR;
}
lce_error lce_unregister_memory(void* ptr) {
cudaError_t error = cudaHostUnregister(ptr);
if( error != cudaSuccess ) {
throw_cuda_error(error, LCE_GPU_ERROR);
}
return LCE_NO_ERROR;
}
|
dc5a35f473a1c4338400dbd5909d340c6a49e7a6.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
//to compile with global memory:
//nvcc -O3 -gencode arch=compute_52,code=sm_52 -DUSE_GLOBAL surf2Dwrite_ex.cu
//to compile with surface memory:
//nvcc -O3 -gencode arch=compute_52,code=sm_52 surf2Dwrite_ex.cu
#include <stdio.h>
#include <iostream>
typedef float mytype;
const int blk_dim=16;
#define my_N 1000
#define A_VAL 1
#define B_VAL 2
surface < void, 2 > a_surf;
surface < void, 2 > b_surf;
surface < void, 2 > c_surf;
void CUDA_SAFE_CALL(hipError_t call, int line) {
switch (call) {
case hipSuccess:
break;
default:
printf("ERROR at line :%i.%d' ' %s\n",
line, call, hipGetErrorString(call));
exit(-1);
break;
}
}
#ifdef USE_GLOBAL
__global__ void mul(const mytype * __restrict__ d_a, const mytype * __restrict__ d_b, mytype * __restrict__ d_c, const int N)
#else
__global__ void mul(const int N)
#endif
{
mytype a, b, c, temp;
int i;
unsigned int x = blockIdx.x * blockDim.x + (threadIdx.x);
unsigned int y = blockIdx.y * blockDim.y + (threadIdx.y);
if (x < N && y < N) {
temp = 0;
for (i = 0; i < N; i++) {
#ifdef USE_GLOBAL
a = d_a[x*N+i];
b = d_b[i*N+y];
#else
surf2Dread( & a, a_surf, (x) * sizeof(mytype), i);
surf2Dread( & b, b_surf, (i) * sizeof(mytype), y);
#endif
temp += a * b;
}
c = temp;
#ifdef USE_GLOBAL
d_c[x*N+y] = c;
#else
// Write to output surface
surf2Dwrite(c, c_surf, x * sizeof(mytype), y);
#endif
}
}
int main() {
const int N = my_N;
mytype *a, *b, *c, *d_a, *d_b, *d_c;
int i, j;
clock_t t1, t2;
hipArray * da, * db, * dc;
hipChannelFormatDesc channelDesc = hipCreateChannelDesc < mytype > ();
dim3 dimBlock(blk_dim, blk_dim);
dim3 dimGrid((N+dimBlock.x-1)/dimBlock.x, (N+dimBlock.y-1)/dimBlock.y);
int s = N * N * sizeof(mytype);
a = (mytype *)malloc(s);
b = (mytype *)malloc(s);
c = (mytype *)malloc(s);
CUDA_SAFE_CALL(hipMalloc(&d_a, s), __LINE__);
CUDA_SAFE_CALL(hipMalloc(&d_b, s), __LINE__);
CUDA_SAFE_CALL(hipMalloc(&d_c, s), __LINE__);
for (i = 0; i < N; i++)
for (j = 0; j < N; j++)
a[i*N+j] = A_VAL;
for (i = 0; i < N; i++)
for (j = 0; j < N; j++)
b[i*N+j] = B_VAL;
CUDA_SAFE_CALL(hipMallocArray( & da, & channelDesc, N, N, hipArraySurfaceLoadStore), __LINE__);
CUDA_SAFE_CALL(hipMallocArray( & db, & channelDesc, N, N, hipArraySurfaceLoadStore), __LINE__);
CUDA_SAFE_CALL(hipMallocArray( & dc, & channelDesc, N, N, hipArraySurfaceLoadStore), __LINE__);
CUDA_SAFE_CALL(hipMemcpyToArray(da, 0, 0, a, s, hipMemcpyHostToDevice), __LINE__);
CUDA_SAFE_CALL(hipMemcpyToArray(db, 0, 0, b, s, hipMemcpyHostToDevice), __LINE__);
CUDA_SAFE_CALL(hipBindSurfaceToArray(a_surf, da), __LINE__);
CUDA_SAFE_CALL(hipBindSurfaceToArray(b_surf, db), __LINE__);
CUDA_SAFE_CALL(hipBindSurfaceToArray(c_surf, dc), __LINE__);
#ifdef USE_GLOBAL
CUDA_SAFE_CALL(hipMemcpy(d_a, a, s, hipMemcpyHostToDevice), __LINE__);
CUDA_SAFE_CALL(hipMemcpy(d_b, b, s, hipMemcpyHostToDevice), __LINE__);
#endif
t1 = clock();
#ifdef USE_GLOBAL
hipLaunchKernelGGL(( mul) , dim3(dimGrid), dim3(dimBlock), 0, 0, d_a, d_b, d_c, N);
#else
hipLaunchKernelGGL(( mul) , dim3(dimGrid), dim3(dimBlock), 0, 0, N);
#endif
hipDeviceSynchronize();
t2 = clock();
CUDA_SAFE_CALL(cudaMemcpyFromArray(c, dc, 0, 0, s, hipMemcpyDeviceToHost), __LINE__);
#ifdef USE_GLOBAL
CUDA_SAFE_CALL(hipMemcpy(c, d_c, s, hipMemcpyDeviceToHost), __LINE__);
#endif
double t3 = (double) t2 - (double) t1;
t3 = t3 / CLOCKS_PER_SEC;
printf("\n CUDA time :%lf\n", t3);
for (i=0; i < N*N; i++)
if(c[i] != A_VAL*B_VAL*N) {std::cout << "mismatch at: " << i << ", was: " << c[i] << " should be: " << A_VAL*B_VAL*N << std::endl; return 1;}
CUDA_SAFE_CALL(hipFreeArray(da), __LINE__);
CUDA_SAFE_CALL(hipFreeArray(db), __LINE__);
CUDA_SAFE_CALL(hipFreeArray(dc), __LINE__);
std::cout << "Success!" << std::endl;
return 0;
}
| dc5a35f473a1c4338400dbd5909d340c6a49e7a6.cu | //to compile with global memory:
//nvcc -O3 -gencode arch=compute_52,code=sm_52 -DUSE_GLOBAL surf2Dwrite_ex.cu
//to compile with surface memory:
//nvcc -O3 -gencode arch=compute_52,code=sm_52 surf2Dwrite_ex.cu
#include <stdio.h>
#include <iostream>
typedef float mytype;
const int blk_dim=16;
#define my_N 1000
#define A_VAL 1
#define B_VAL 2
surface < void, 2 > a_surf;
surface < void, 2 > b_surf;
surface < void, 2 > c_surf;
void CUDA_SAFE_CALL(cudaError_t call, int line) {
switch (call) {
case cudaSuccess:
break;
default:
printf("ERROR at line :%i.%d' ' %s\n",
line, call, cudaGetErrorString(call));
exit(-1);
break;
}
}
#ifdef USE_GLOBAL
__global__ void mul(const mytype * __restrict__ d_a, const mytype * __restrict__ d_b, mytype * __restrict__ d_c, const int N)
#else
__global__ void mul(const int N)
#endif
{
mytype a, b, c, temp;
int i;
unsigned int x = blockIdx.x * blockDim.x + (threadIdx.x);
unsigned int y = blockIdx.y * blockDim.y + (threadIdx.y);
if (x < N && y < N) {
temp = 0;
for (i = 0; i < N; i++) {
#ifdef USE_GLOBAL
a = d_a[x*N+i];
b = d_b[i*N+y];
#else
surf2Dread( & a, a_surf, (x) * sizeof(mytype), i);
surf2Dread( & b, b_surf, (i) * sizeof(mytype), y);
#endif
temp += a * b;
}
c = temp;
#ifdef USE_GLOBAL
d_c[x*N+y] = c;
#else
// Write to output surface
surf2Dwrite(c, c_surf, x * sizeof(mytype), y);
#endif
}
}
int main() {
const int N = my_N;
mytype *a, *b, *c, *d_a, *d_b, *d_c;
int i, j;
clock_t t1, t2;
cudaArray * da, * db, * dc;
cudaChannelFormatDesc channelDesc = cudaCreateChannelDesc < mytype > ();
dim3 dimBlock(blk_dim, blk_dim);
dim3 dimGrid((N+dimBlock.x-1)/dimBlock.x, (N+dimBlock.y-1)/dimBlock.y);
int s = N * N * sizeof(mytype);
a = (mytype *)malloc(s);
b = (mytype *)malloc(s);
c = (mytype *)malloc(s);
CUDA_SAFE_CALL(cudaMalloc(&d_a, s), __LINE__);
CUDA_SAFE_CALL(cudaMalloc(&d_b, s), __LINE__);
CUDA_SAFE_CALL(cudaMalloc(&d_c, s), __LINE__);
for (i = 0; i < N; i++)
for (j = 0; j < N; j++)
a[i*N+j] = A_VAL;
for (i = 0; i < N; i++)
for (j = 0; j < N; j++)
b[i*N+j] = B_VAL;
CUDA_SAFE_CALL(cudaMallocArray( & da, & channelDesc, N, N, cudaArraySurfaceLoadStore), __LINE__);
CUDA_SAFE_CALL(cudaMallocArray( & db, & channelDesc, N, N, cudaArraySurfaceLoadStore), __LINE__);
CUDA_SAFE_CALL(cudaMallocArray( & dc, & channelDesc, N, N, cudaArraySurfaceLoadStore), __LINE__);
CUDA_SAFE_CALL(cudaMemcpyToArray(da, 0, 0, a, s, cudaMemcpyHostToDevice), __LINE__);
CUDA_SAFE_CALL(cudaMemcpyToArray(db, 0, 0, b, s, cudaMemcpyHostToDevice), __LINE__);
CUDA_SAFE_CALL(cudaBindSurfaceToArray(a_surf, da), __LINE__);
CUDA_SAFE_CALL(cudaBindSurfaceToArray(b_surf, db), __LINE__);
CUDA_SAFE_CALL(cudaBindSurfaceToArray(c_surf, dc), __LINE__);
#ifdef USE_GLOBAL
CUDA_SAFE_CALL(cudaMemcpy(d_a, a, s, cudaMemcpyHostToDevice), __LINE__);
CUDA_SAFE_CALL(cudaMemcpy(d_b, b, s, cudaMemcpyHostToDevice), __LINE__);
#endif
t1 = clock();
#ifdef USE_GLOBAL
mul <<<dimGrid, dimBlock>>> (d_a, d_b, d_c, N);
#else
mul <<<dimGrid, dimBlock>>> (N);
#endif
cudaDeviceSynchronize();
t2 = clock();
CUDA_SAFE_CALL(cudaMemcpyFromArray(c, dc, 0, 0, s, cudaMemcpyDeviceToHost), __LINE__);
#ifdef USE_GLOBAL
CUDA_SAFE_CALL(cudaMemcpy(c, d_c, s, cudaMemcpyDeviceToHost), __LINE__);
#endif
double t3 = (double) t2 - (double) t1;
t3 = t3 / CLOCKS_PER_SEC;
printf("\n CUDA time :%lf\n", t3);
for (i=0; i < N*N; i++)
if(c[i] != A_VAL*B_VAL*N) {std::cout << "mismatch at: " << i << ", was: " << c[i] << " should be: " << A_VAL*B_VAL*N << std::endl; return 1;}
CUDA_SAFE_CALL(cudaFreeArray(da), __LINE__);
CUDA_SAFE_CALL(cudaFreeArray(db), __LINE__);
CUDA_SAFE_CALL(cudaFreeArray(dc), __LINE__);
std::cout << "Success!" << std::endl;
return 0;
}
|
7f58a277959db89e9289a951e0fae3823d353755.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <iostream>
#include <stdio.h>
__global__ void add(int *a, int *b, int *c) {
*c = *a + *b;
}
int main(void) {
int a, b, c; // host copies of a, b, c
int *d_a, *d_b, *d_c; // device copies of a, b, c
int size = sizeof(int);
// Allocate space for device copies of a, b, c
hipMalloc((void **)&d_a, size);
hipMalloc((void **)&d_b, size);
hipMalloc((void **)&d_c, size);
// Setup input values
a = 2;
b = 7;
// Copy inputs to device
hipMemcpy(d_a, &a, size, hipMemcpyHostToDevice);
hipMemcpy(d_b, &b, size, hipMemcpyHostToDevice);
// Launch add() kernel on GPUhipLaunchKernelGGL((
add), dim3(1),dim3(1), 0, 0, d_a, d_b, d_c);
// Copy result back to host
hipMemcpy(&c, d_c, size, hipMemcpyDeviceToHost);
printf("%d\n", c);
// Cleanup
hipFree(d_a); hipFree(d_b); hipFree(d_c);
return 0;
}
| 7f58a277959db89e9289a951e0fae3823d353755.cu | #include <iostream>
#include <stdio.h>
__global__ void add(int *a, int *b, int *c) {
*c = *a + *b;
}
int main(void) {
int a, b, c; // host copies of a, b, c
int *d_a, *d_b, *d_c; // device copies of a, b, c
int size = sizeof(int);
// Allocate space for device copies of a, b, c
cudaMalloc((void **)&d_a, size);
cudaMalloc((void **)&d_b, size);
cudaMalloc((void **)&d_c, size);
// Setup input values
a = 2;
b = 7;
// Copy inputs to device
cudaMemcpy(d_a, &a, size, cudaMemcpyHostToDevice);
cudaMemcpy(d_b, &b, size, cudaMemcpyHostToDevice);
// Launch add() kernel on GPU
add<<<1,1>>>(d_a, d_b, d_c);
// Copy result back to host
cudaMemcpy(&c, d_c, size, cudaMemcpyDeviceToHost);
printf("%d\n", c);
// Cleanup
cudaFree(d_a); cudaFree(d_b); cudaFree(d_c);
return 0;
}
|
06b3b76a03a556383c9f8bc093b351f8e2a4b8bc.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <cstdio>
#include <cstdlib>
#include <stdio.h>
#include "template.hu"
#define BLOCK_SIZE 512
#define WARP_SIZE 32
#define NUM_WARPS (BLOCK_SIZE / WARP_SIZE)
// Maximum number of elements that can be inserted into a block queue
#define BQ_CAPACITY 2048
// Maximum number of elements that can be inserted into a warp queue
#define WQ_CAPACITY 128
/******************************************************************************
GPU kernels
*******************************************************************************/
__global__ void gpu_global_queuing_kernel(unsigned int *nodePtrs,
unsigned int *nodeNeighbors,
unsigned int *nodeVisited,
unsigned int *currLevelNodes,
unsigned int *nextLevelNodes,
unsigned int *numCurrLevelNodes,
unsigned int *numNextLevelNodes) {
// INSERT KERNEL CODE HERE
// Loop over all nodes in the curent level
// Loop over all neighbors of the node
// If the neighbor hasn't been visited yet
// Add it to the global queue
}
__global__ void gpu_block_queuing_kernel(unsigned int *nodePtrs,
unsigned int *nodeNeighbors,
unsigned int *nodeVisited,
unsigned int *currLevelNodes,
unsigned int *nextLevelNodes,
unsigned int *numCurrLevelNodes,
unsigned int *numNextLevelNodes) {
// INSERT KERNEL CODE HERE
// Initialize shared memory queue
// Loop over all nodes in the curent level
// Loop over all neighbors of the node
// If the neighbor hasn't been visited yet
// Add it to the block queue
// If full, add it to the global queue
// Calculate space for block queue to go into global queue
// Store block queue in global queue
}
__global__ void gpu_warp_queuing_kernel(unsigned int *nodePtrs,
unsigned int *nodeNeighbors,
unsigned int *nodeVisited,
unsigned int *currLevelNodes,
unsigned int *nextLevelNodes,
unsigned int *numCurrLevelNodes,
unsigned int *numNextLevelNodes) {
// INSERT KERNEL CODE HERE
// This version uses one queue per warp
// Initialize shared memory queue
// Loop over all nodes in the curent level
// Loop over all neighbors of the node
// If the neighbor hasn't been visited yet
// Add it to the warp queue
// If full, add it to the block queue
// If full, add it to the global queue
// Calculate space for warp queue to go into block queue
// Store warp queue in block queue
// If full, add it to the global queue
// Calculate space for block queue to go into global queue
// Saturate block queue counter
// Calculate space for global queue
// Store block queue in global queue
}
/******************************************************************************
Functions
*******************************************************************************/
// DON NOT MODIFY THESE FUNCTIONS!
void gpu_global_queuing(unsigned int *nodePtrs, unsigned int *nodeNeighbors,
unsigned int *nodeVisited, unsigned int *currLevelNodes,
unsigned int *nextLevelNodes,
unsigned int *numCurrLevelNodes,
unsigned int *numNextLevelNodes) {
const unsigned int numBlocks = 45;
gpu_global_queuing_kernel << <numBlocks, BLOCK_SIZE>>>
(nodePtrs, nodeNeighbors, nodeVisited, currLevelNodes, nextLevelNodes,
numCurrLevelNodes, numNextLevelNodes);
}
void gpu_block_queuing(unsigned int *nodePtrs, unsigned int *nodeNeighbors,
unsigned int *nodeVisited, unsigned int *currLevelNodes,
unsigned int *nextLevelNodes,
unsigned int *numCurrLevelNodes,
unsigned int *numNextLevelNodes) {
const unsigned int numBlocks = 45;
gpu_block_queuing_kernel << <numBlocks, BLOCK_SIZE>>>
(nodePtrs, nodeNeighbors, nodeVisited, currLevelNodes, nextLevelNodes,
numCurrLevelNodes, numNextLevelNodes);
}
void gpu_warp_queuing(unsigned int *nodePtrs, unsigned int *nodeNeighbors,
unsigned int *nodeVisited, unsigned int *currLevelNodes,
unsigned int *nextLevelNodes,
unsigned int *numCurrLevelNodes,
unsigned int *numNextLevelNodes) {
const unsigned int numBlocks = 45;
gpu_warp_queuing_kernel << <numBlocks, BLOCK_SIZE>>>
(nodePtrs, nodeNeighbors, nodeVisited, currLevelNodes, nextLevelNodes,
numCurrLevelNodes, numNextLevelNodes);
}
| 06b3b76a03a556383c9f8bc093b351f8e2a4b8bc.cu | #include <cstdio>
#include <cstdlib>
#include <stdio.h>
#include "template.hu"
#define BLOCK_SIZE 512
#define WARP_SIZE 32
#define NUM_WARPS (BLOCK_SIZE / WARP_SIZE)
// Maximum number of elements that can be inserted into a block queue
#define BQ_CAPACITY 2048
// Maximum number of elements that can be inserted into a warp queue
#define WQ_CAPACITY 128
/******************************************************************************
GPU kernels
*******************************************************************************/
__global__ void gpu_global_queuing_kernel(unsigned int *nodePtrs,
unsigned int *nodeNeighbors,
unsigned int *nodeVisited,
unsigned int *currLevelNodes,
unsigned int *nextLevelNodes,
unsigned int *numCurrLevelNodes,
unsigned int *numNextLevelNodes) {
// INSERT KERNEL CODE HERE
// Loop over all nodes in the curent level
// Loop over all neighbors of the node
// If the neighbor hasn't been visited yet
// Add it to the global queue
}
__global__ void gpu_block_queuing_kernel(unsigned int *nodePtrs,
unsigned int *nodeNeighbors,
unsigned int *nodeVisited,
unsigned int *currLevelNodes,
unsigned int *nextLevelNodes,
unsigned int *numCurrLevelNodes,
unsigned int *numNextLevelNodes) {
// INSERT KERNEL CODE HERE
// Initialize shared memory queue
// Loop over all nodes in the curent level
// Loop over all neighbors of the node
// If the neighbor hasn't been visited yet
// Add it to the block queue
// If full, add it to the global queue
// Calculate space for block queue to go into global queue
// Store block queue in global queue
}
__global__ void gpu_warp_queuing_kernel(unsigned int *nodePtrs,
unsigned int *nodeNeighbors,
unsigned int *nodeVisited,
unsigned int *currLevelNodes,
unsigned int *nextLevelNodes,
unsigned int *numCurrLevelNodes,
unsigned int *numNextLevelNodes) {
// INSERT KERNEL CODE HERE
// This version uses one queue per warp
// Initialize shared memory queue
// Loop over all nodes in the curent level
// Loop over all neighbors of the node
// If the neighbor hasn't been visited yet
// Add it to the warp queue
// If full, add it to the block queue
// If full, add it to the global queue
// Calculate space for warp queue to go into block queue
// Store warp queue in block queue
// If full, add it to the global queue
// Calculate space for block queue to go into global queue
// Saturate block queue counter
// Calculate space for global queue
// Store block queue in global queue
}
/******************************************************************************
Functions
*******************************************************************************/
// DON NOT MODIFY THESE FUNCTIONS!
void gpu_global_queuing(unsigned int *nodePtrs, unsigned int *nodeNeighbors,
unsigned int *nodeVisited, unsigned int *currLevelNodes,
unsigned int *nextLevelNodes,
unsigned int *numCurrLevelNodes,
unsigned int *numNextLevelNodes) {
const unsigned int numBlocks = 45;
gpu_global_queuing_kernel << <numBlocks, BLOCK_SIZE>>>
(nodePtrs, nodeNeighbors, nodeVisited, currLevelNodes, nextLevelNodes,
numCurrLevelNodes, numNextLevelNodes);
}
void gpu_block_queuing(unsigned int *nodePtrs, unsigned int *nodeNeighbors,
unsigned int *nodeVisited, unsigned int *currLevelNodes,
unsigned int *nextLevelNodes,
unsigned int *numCurrLevelNodes,
unsigned int *numNextLevelNodes) {
const unsigned int numBlocks = 45;
gpu_block_queuing_kernel << <numBlocks, BLOCK_SIZE>>>
(nodePtrs, nodeNeighbors, nodeVisited, currLevelNodes, nextLevelNodes,
numCurrLevelNodes, numNextLevelNodes);
}
void gpu_warp_queuing(unsigned int *nodePtrs, unsigned int *nodeNeighbors,
unsigned int *nodeVisited, unsigned int *currLevelNodes,
unsigned int *nextLevelNodes,
unsigned int *numCurrLevelNodes,
unsigned int *numNextLevelNodes) {
const unsigned int numBlocks = 45;
gpu_warp_queuing_kernel << <numBlocks, BLOCK_SIZE>>>
(nodePtrs, nodeNeighbors, nodeVisited, currLevelNodes, nextLevelNodes,
numCurrLevelNodes, numNextLevelNodes);
}
|
85d8d9806d25039ae5c12666695bf26c7acbbbaa.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <iostream>
#include <stdio.h>
#include "gpukernel.h"
#include <math.h>
#define groupsize 4
#define BLOCKSIZE 256
#define FULLMASK 0xffffffff
__global__ void GPU_HelloWorld(int* a){
int i;
i = blockDim.x * blockIdx.x + threadIdx.x;
a[i] = i;
}
__global__ void GPU_ReverseWeightVolume(int numOwnedPoints, double* mp){
int i,j,k;
i = blockDim.x * blockIdx.x + threadIdx.x;
if(i<numOwnedPoints){
mp[i] = 1.0/mp[i];
}
}
__global__ void GPU_initialize(int numOwnedPoints, double* x, double* v, double* a, double* u, double* y, double dt){
int i;
i = blockDim.x * blockIdx.x + threadIdx.x;
if(i<numOwnedPoints){
v[i*3] = (200-50*pow(((x[i*3+2]/0.05)-1),2))*cos(atan2(x[i*3+1],x[i*3]));
v[i*3+1] = (200-50*pow(((x[i*3+2]/0.05)-1),2))*sin(atan2(x[i*3+1],x[i*3]));
v[i*3+2] = 100*(x[i*3+2]/0.05-1);
y[i*3] = y[i*3] + dt * v[i*3];
y[i*3+1] = y[i*3+1] + dt * v[i*3+1];
y[i*3+2] = y[i*3+2] + dt * v[i*3+2];
u[i*3] = dt * v[i*3];
u[i*3+1] = dt * v[i*3+1];
u[i*3+2] = dt * v[i*3+2];
}
}
void GPU_initialize_Interface(int numOwnedPoints, GParam* param){
int i;
int nthreads = numOwnedPoints+param->TotalImport;
int nblocks = (numOwnedPoints+param->TotalImport)/BLOCKSIZE+1;
hipLaunchKernelGGL(( GPU_initialize), dim3(dim3(nblocks)), dim3(dim3(BLOCKSIZE)), 0, 0, numOwnedPoints, param->x, param->v, param->a, param->u, param->y, param->dt);
hipDeviceSynchronize();
}
__global__ void divideNeighbor(int numOwnedPoints, int* neighborhoodlist, int* neighborPtr,int groupnum){
int i;
i = blockDim.x * blockIdx.x + threadIdx.x;
if(i<groupnum){
int *Nptr = &neighborhoodlist[neighborPtr[i]];
int numNeigh = *Nptr;
Nptr++;
int j;
for(j=0;j<numNeigh;j++){
if(Nptr[j]>=numOwnedPoints)
break;
}
neighborhoodlist[neighborPtr[i]] |= (j<<16);
}
}
void divideNeighbor_Interface(int numOwnedPoints, GParam* param){
int nthreads = numOwnedPoints+param->TotalImport;
int nblocks = (numOwnedPoints+param->TotalImport)/BLOCKSIZE+1;
hipLaunchKernelGGL(( divideNeighbor), dim3(nblocks),dim3(BLOCKSIZE),0,0, numOwnedPoints, param->neiborlist, param->neighborPtr, numOwnedPoints/groupsize+(numOwnedPoints%groupsize!=0));
hipDeviceSynchronize();
hipLaunchKernelGGL(( GPU_ReverseWeightVolume), dim3(nblocks),dim3(BLOCKSIZE),0,0, numOwnedPoints+param->TotalImport, param->weightvolume);
hipDeviceSynchronize();
}
__global__ void DilatationKernel(double *x, double *y, double*m, double* c, double *d, int* bonddamage, int * neighborlist, int numOwnedpoints, double horizon, int* neighborPtr, int outflag){
int i,j,k;
i = blockDim.x * blockIdx.x + threadIdx.x;
numOwnedpoints = numOwnedpoints + (groupsize - numOwnedpoints%groupsize)*(numOwnedpoints%groupsize!=0);
if(i< numOwnedpoints){
int *Nptr = &neighborlist[neighborPtr[i/groupsize]];
int st,ed;
int numNeigh = ((*Nptr)&0x0000ffff)-outflag*(((*Nptr)&0xffff0000)>>16);
int *bond = &bonddamage[neighborPtr[i/groupsize]];
int mid = ((*Nptr)&0xffff0000)>>16;
int total = ((*Nptr)&0x0000ffff);
st = outflag*mid;
ed = mid + outflag*(total - mid);
Nptr++;
Nptr += st;
bond += st;
numNeigh = ed-st;
double ixx =x[i*3];
double ixy =x[i*3+1];
double ixz =x[i*3+2];
double iyx =y[i*3];
double iyy =y[i*3+1];
double iyz =y[i*3+2];
double di = d[i]*(outflag);
double xdx,xdy,xdz,initdist,ydx,ydy,ydz,currdist,e;
double s;
int ci = i/groupsize;
int position = i % groupsize;
int loop = numNeigh/groupsize /*+ (numNeigh%groupsize != 0)*/;
double rx[4],ry[4];
int rc[4];
int stid = (i/groupsize)*groupsize;
int damage;
for(j=0;j<loop;j++){
int p = Nptr[j*groupsize + position];
rx[0] = x[p*3];
rx[1] = x[p*3+1];
rx[2] = x[p*3+2];
rx[3] = c[p];
ry[0] = y[p*3];
ry[1] = y[p*3+1];
ry[2] = y[p*3+2];
ry[3] = m[p];
for(k=0;k<groupsize;k++){
//int thisp = __shlf(p, stid+(position+k)%groupsize);
damage = (bond[j*groupsize+(position+k)%groupsize]>>position)&0x00000001;
double xjx = __shfl_sync(FULLMASK,rx[0], stid+(position+k)%groupsize, 32);
double xjy = __shfl_sync(FULLMASK,rx[1], stid+(position+k)%groupsize, 32);
double xjz = __shfl_sync(FULLMASK,rx[2], stid+(position+k)%groupsize, 32);
double cj = __shfl_sync(FULLMASK,rx[3], stid+(position+k)%groupsize, 32);
double yjx = __shfl_sync(FULLMASK,ry[0], stid+(position+k)%groupsize, 32);
double yjy = __shfl_sync(FULLMASK,ry[1], stid+(position+k)%groupsize, 32);
double yjz = __shfl_sync(FULLMASK,ry[2], stid+(position+k)%groupsize, 32);
double mj = __shfl_sync(FULLMASK,ry[3], stid+(position+k)%groupsize, 32);
xdx = ixx - xjx;
xdy = ixy - xjy;
xdz = ixz - xjz;
initdist = xdx*xdx + xdy*xdy + xdz*xdz;
int flag = (initdist<=horizon*horizon);
ydx = yjx - iyx;
ydy = yjy - iyy;
ydz = yjz - iyz;
currdist = ydx*ydx + ydy*ydy + ydz*ydz;
double currinit = sqrt(initdist*currdist);
//e = currdist - initdist;
s = (currdist- 2*currinit + initdist);
int bflag = (s>0.02*0.02*initdist)*((currdist-initdist)>0);
damage = bflag | damage;
di += flag*3*(1-damage)*(currinit - initdist)*cj*m[i];
rc[(k+position)%groupsize] = damage<<position;
//atomicOr(&bond[j*groupsize+(k+position)%groupsize],damage<<position);
}
int dam = 0;
dam |= rc[position];
for(k=1;k<groupsize;k++){
dam |= __shfl_sync(FULLMASK,rc[(position+k)%groupsize], stid+(position-k+groupsize)%groupsize, 32);
}
bond[j*groupsize+position] = dam;
}
//rest = numNeigh % groupsize;
for(int n = j*groupsize; n<numNeigh; n++){
int p = Nptr[n];
xdx = ixx - x[p*3];
xdy = ixy - x[p*3+1];
xdz = ixz - x[p*3+2];
int damage = (bond[n] >> position)&0x00000001;
//int damage = 0;
initdist = xdx*xdx + xdy*xdy + xdz*xdz;
int flag = (initdist<=horizon*horizon);
ydx = y[p*3] - iyx;
ydy = y[p*3+1] - iyy;
ydz = y[p*3+2] - iyz;
currdist = ydx*ydx + ydy*ydy + ydz*ydz;
double currinit = sqrt(initdist*currdist);
//e = currdist - initdist;
s = (currdist - 2*currinit + initdist);
int bflag = (s>0.02*0.02*initdist)*((currdist-initdist)>0);
damage = bflag | damage;
//if(s > 0.02)
// *bond = 1.0;
di += flag*3*(1-damage)*(currinit - initdist )*c[p]*m[i];
atomicOr(&bond[n],damage<<position);
}
d[i] = di;
}
}
void Dkernel_Interface(GParam* param, int numOwnedPoints, double horizon, hipStream_t stream0, int flag){
int nthreads = numOwnedPoints+param->TotalImport;
int nblocks = (numOwnedPoints+param->TotalImport)/BLOCKSIZE+1;
hipLaunchKernelGGL(( DilatationKernel), dim3(nblocks),dim3(BLOCKSIZE),0,stream0, param->x, param->y, param->weightvolume, param->cellvolume, param->dilatation, param->mybondDamage, param->neiborlist, numOwnedPoints, horizon, param->neighborPtr,flag);
}
__global__ void ForceKernel(double *x, double *y, double*m, double* c, double *d, int* bonddamage, double *f, int * neighborlist, int numOwnedpoints,double bulkModulus,double shearModulus, double horizon, int* neighborPtr, double* a, double* v, double* u, double density, double dt, double* yout,int outflag){
int i,j,k;
i = blockDim.x * blockIdx.x + threadIdx.x;
numOwnedpoints = numOwnedpoints + (groupsize - numOwnedpoints%groupsize)*(numOwnedpoints%groupsize!=0);
if(i< numOwnedpoints){
int *Nptr = &neighborlist[neighborPtr[i/groupsize]];
int st,ed;
int numNeigh = ((*Nptr)&0x0000ffff)-outflag*(((*Nptr)&0xffff0000)>>16);
int *bond = &bonddamage[neighborPtr[i/groupsize]];
int mid = ((*Nptr)&0xffff0000)>>16;
int total = ((*Nptr)&0x0000ffff);
st = outflag*mid;
ed = mid + outflag*(total - mid);
Nptr++;
Nptr += st;
bond += st;
numNeigh = ed-st;
/*int *Nptr = &neighborlist[neighborPtr[i/groupsize]];
int *bond = &bonddamage[neighborPtr[i/groupsize]];
int numNeigh = ((*Nptr)&0x0000ffff);
Nptr++;*/
double K = bulkModulus;
double MU = shearModulus;
double ixx =x[i*3];
double ixy =x[i*3+1];
double ixz =x[i*3+2];
double iyx =y[i*3];
double iyy =y[i*3+1];
double iyz =y[i*3+2];
double fxi = f[i*3]*outflag;
double fyi = f[i*3+1]*outflag;
double fzi = f[i*3+2]*outflag;
double xdx,xdy,xdz,initdist,ydx,ydy,ydz,currdist,e;
int position = i % groupsize;
int ci = i/groupsize;
//int position = i % groupsize;
int loop = numNeigh/groupsize /*+ (numNeigh%groupsize != 0)*/;
double rx[4],ry[4],rd;
int rc[4];
int stid = (i/groupsize)*groupsize;
int damage;
long i1;
long magic = 0x5fe6ec85e7de30da;
double x2,q,r,z;
double thr = 1.0/3.0;
for(j=0;j<loop;j++){
int p = Nptr[j*groupsize + position];
rx[0] = x[p*3];
rx[1] = x[p*3+1];
rx[2] = x[p*3+2];
rx[3] = c[p];
ry[0] = y[p*3];
ry[1] = y[p*3+1];
ry[2] = y[p*3+2];
ry[3] = m[p];
rd = d[p];
for(k=0;k<groupsize;k++){
damage = (bond[j*groupsize+(k+position)%groupsize]>>position)&0x00000001;
double xjx = __shfl_sync(FULLMASK,rx[0], stid+(position+k)%groupsize, 32);
double xjy = __shfl_sync(FULLMASK,rx[1], stid+(position+k)%groupsize, 32);
double xjz = __shfl_sync(FULLMASK,rx[2], stid+(position+k)%groupsize, 32);
double cj = __shfl_sync(FULLMASK,rx[3], stid+(position+k)%groupsize, 32);
double yjx = __shfl_sync(FULLMASK,ry[0], stid+(position+k)%groupsize, 32);
double yjy = __shfl_sync(FULLMASK,ry[1], stid+(position+k)%groupsize, 32);
double yjz = __shfl_sync(FULLMASK,ry[2], stid+(position+k)%groupsize, 32);
double mj = __shfl_sync(FULLMASK,ry[3], stid+(position+k)%groupsize, 32);
double dj = __shfl_sync(FULLMASK,rd, stid+(position+k)%groupsize, 32);
xdx = ixx - xjx;
xdy = ixy - xjy;
xdz = ixz - xjz;
double initdist2 = xdx*xdx + xdy*xdy + xdz*xdz;
initdist = sqrt(xdx*xdx + xdy*xdy + xdz*xdz);
int flag = (initdist<=horizon);
ydx = yjx - iyx;
ydy = yjy - iyy;
ydz = yjz - iyz;
currdist = ydx*ydx + ydy*ydy + ydz*ydz;
x2 = currdist * 0.5;
i1 = *(long *)&currdist;
i1 = magic - (i1>>1);
z = *(double *)&i1;
z = z*(1.5-x2*z*z);
z = z*(1.5-x2*z*z);
z = z*(1.5-x2*z*z);
z = z*(1.5-x2*z*z);
z = z*(1.5-x2*z*z);
double temp = z;
e = (1.0 - initdist*temp)*(currdist != initdist2);
//e = 1.0 - initdist*temp;
double zeroflag = (double)(currdist == 0);
double alpha = 15.0*MU*m[i];
double alphap = 15.0*MU*mj;
double c1 = 1 * d[i] * (3.0*K*m[i] - alpha*thr);
double cp = 1 * dj * (3.0*K*mj - alphap*thr);
double t = (1-damage) * (c1* initdist*temp + (1-damage)*1*alpha*e)*flag;
double tp = (1-damage) * (cp* initdist*temp + (1-damage)*1*alphap*e)*flag;
double fx = t * ydx;
double fy = t * ydy;
double fz = t * ydz;
double fxp = tp * ydx;
double fyp = tp * ydy;
double fzp = tp * ydz;
fxi += fx *cj;
fyi += fy *cj;
fzi += fz *cj;
fxi += fxp *cj;
fyi += fyp *cj;
fzi += fzp *cj;
}
}
for(int n = j*groupsize; n<numNeigh; n++){
int p = Nptr[n];
xdx = ixx - x[p*3];
xdy = ixy - x[p*3+1];
xdz = ixz - x[p*3+2];
double initdist2 = xdx*xdx + xdy*xdy + xdz*xdz;
initdist = sqrt(xdx*xdx + xdy*xdy + xdz*xdz);
damage = (bond[n] >> position)&0x00000001;
int flag = (initdist<=horizon);
//flag = (double)(initdist <= horizon);
ydx = y[p*3] - iyx;
ydy = y[p*3+1] - iyy;
ydz = y[p*3+2] - iyz;
currdist = ydx*ydx + ydy*ydy + ydz*ydz;
double zeroflag = (double)(currdist == 0);
x2 = currdist * 0.5;
i1 = *(long *)&currdist;
i1 = magic - (i1>>1);
z = *(double *)&i1;
z = z*(1.5-x2*z*z);
z = z*(1.5-x2*z*z);
z = z*(1.5-x2*z*z);
z = z*(1.5-x2*z*z);
z = z*(1.5-x2*z*z);
double temp = z;
e = (1.0 - initdist*temp)*(currdist != initdist2);
//zeroflag = 1.0e-16;
double alpha = 15.0*MU*m[i];
double alphap = 15.0*MU*m[p];
double c1 = 1 * d[i] * (3.0*K*m[i] - alpha*thr);
double cp = 1 * d[p] * (3.0*K*m[p] - alphap*thr);
double t = (1-damage) * (c1* initdist*temp + (1-damage)*1*alpha*e)*flag;
double tp = (1-damage) * (cp* initdist*temp + (1-damage)*1*alphap*e)*flag;
double fx = t * ydx;
double fy = t * ydy;
double fz = t * ydz;
double fxp = tp * ydx;
double fyp = tp * ydy;
double fzp = tp * ydz;
fxi += fx *c[p];
fyi += fy *c[p];
fzi += fz *c[p];
fxi += fxp *c[p];
fyi += fyp *c[p];
fzi += fzp *c[p];
}
f[i*3] = fxi;
f[i*3+1] = fyi;
f[i*3+2] = fzi;
double a1,a2,a3,v1,v2,v3;
a1 = fxi/density;
a2 = fyi/density;
a3 = fzi/density;
a[i*3] = a1;
a[i*3+1] = a2;
a[i*3+2] = a3;
v1 = v[i*3]+(a1*dt*0.5+a1*dt*0.5)*outflag;
v2 = v[i*3+1]+(a2*dt*0.5+ a2*dt*0.5)*outflag;
v3 = v[i*3+2]+(a3*dt*0.5 + a3*dt*0.5)*outflag;
//v2 += a2*dt*0.5;
//v3 += a2*dt*0.5;
v[i*3] = v1;
v[i*3+1]= v2;
v[i*3+2] = v3;
u[i*3] += dt*v1*outflag;
u[i*3+1] += dt*v2*outflag;
u[i*3+2] += dt*v3*outflag;
yout[i*3] = iyx+dt*v1*outflag;
yout[i*3+1] = iyy+dt*v2*outflag;
yout[i*3+2] = iyz+dt*v3*outflag;
}
}
void Fkernel_Interface(GParam* param, int numOwnedPoints, double horizon, double bulkModulus, double shearModulus, hipStream_t stream0, int flag){
int nthreads = numOwnedPoints+param->TotalImport;
int nblocks = (numOwnedPoints+param->TotalImport)/BLOCKSIZE+1;
hipLaunchKernelGGL(( ForceKernel), dim3(nblocks),dim3(BLOCKSIZE),0,stream0, param->x, param->y, param->weightvolume, param->cellvolume, param->dilatation, param->mybondDamage, param->force, param->neiborlist, numOwnedPoints, bulkModulus,shearModulus,horizon,param->neighborPtr, param->a, param->v, param->u, param->density,param->dt,param->yout,flag);
}
| 85d8d9806d25039ae5c12666695bf26c7acbbbaa.cu | #include "cuda_runtime.h"
#include <iostream>
#include <stdio.h>
#include "gpukernel.h"
#include <math.h>
#define groupsize 4
#define BLOCKSIZE 256
#define FULLMASK 0xffffffff
__global__ void GPU_HelloWorld(int* a){
int i;
i = blockDim.x * blockIdx.x + threadIdx.x;
a[i] = i;
}
__global__ void GPU_ReverseWeightVolume(int numOwnedPoints, double* mp){
int i,j,k;
i = blockDim.x * blockIdx.x + threadIdx.x;
if(i<numOwnedPoints){
mp[i] = 1.0/mp[i];
}
}
__global__ void GPU_initialize(int numOwnedPoints, double* x, double* v, double* a, double* u, double* y, double dt){
int i;
i = blockDim.x * blockIdx.x + threadIdx.x;
if(i<numOwnedPoints){
v[i*3] = (200-50*pow(((x[i*3+2]/0.05)-1),2))*cos(atan2(x[i*3+1],x[i*3]));
v[i*3+1] = (200-50*pow(((x[i*3+2]/0.05)-1),2))*sin(atan2(x[i*3+1],x[i*3]));
v[i*3+2] = 100*(x[i*3+2]/0.05-1);
y[i*3] = y[i*3] + dt * v[i*3];
y[i*3+1] = y[i*3+1] + dt * v[i*3+1];
y[i*3+2] = y[i*3+2] + dt * v[i*3+2];
u[i*3] = dt * v[i*3];
u[i*3+1] = dt * v[i*3+1];
u[i*3+2] = dt * v[i*3+2];
}
}
void GPU_initialize_Interface(int numOwnedPoints, GParam* param){
int i;
int nthreads = numOwnedPoints+param->TotalImport;
int nblocks = (numOwnedPoints+param->TotalImport)/BLOCKSIZE+1;
GPU_initialize<<<dim3(nblocks), dim3(BLOCKSIZE)>>>(numOwnedPoints, param->x, param->v, param->a, param->u, param->y, param->dt);
cudaDeviceSynchronize();
}
__global__ void divideNeighbor(int numOwnedPoints, int* neighborhoodlist, int* neighborPtr,int groupnum){
int i;
i = blockDim.x * blockIdx.x + threadIdx.x;
if(i<groupnum){
int *Nptr = &neighborhoodlist[neighborPtr[i]];
int numNeigh = *Nptr;
Nptr++;
int j;
for(j=0;j<numNeigh;j++){
if(Nptr[j]>=numOwnedPoints)
break;
}
neighborhoodlist[neighborPtr[i]] |= (j<<16);
}
}
void divideNeighbor_Interface(int numOwnedPoints, GParam* param){
int nthreads = numOwnedPoints+param->TotalImport;
int nblocks = (numOwnedPoints+param->TotalImport)/BLOCKSIZE+1;
divideNeighbor<<<nblocks,BLOCKSIZE,0,0>>>(numOwnedPoints, param->neiborlist, param->neighborPtr, numOwnedPoints/groupsize+(numOwnedPoints%groupsize!=0));
cudaDeviceSynchronize();
GPU_ReverseWeightVolume<<<nblocks,BLOCKSIZE,0,0>>>(numOwnedPoints+param->TotalImport, param->weightvolume);
cudaDeviceSynchronize();
}
__global__ void DilatationKernel(double *x, double *y, double*m, double* c, double *d, int* bonddamage, int * neighborlist, int numOwnedpoints, double horizon, int* neighborPtr, int outflag){
int i,j,k;
i = blockDim.x * blockIdx.x + threadIdx.x;
numOwnedpoints = numOwnedpoints + (groupsize - numOwnedpoints%groupsize)*(numOwnedpoints%groupsize!=0);
if(i< numOwnedpoints){
int *Nptr = &neighborlist[neighborPtr[i/groupsize]];
int st,ed;
int numNeigh = ((*Nptr)&0x0000ffff)-outflag*(((*Nptr)&0xffff0000)>>16);
int *bond = &bonddamage[neighborPtr[i/groupsize]];
int mid = ((*Nptr)&0xffff0000)>>16;
int total = ((*Nptr)&0x0000ffff);
st = outflag*mid;
ed = mid + outflag*(total - mid);
Nptr++;
Nptr += st;
bond += st;
numNeigh = ed-st;
double ixx =x[i*3];
double ixy =x[i*3+1];
double ixz =x[i*3+2];
double iyx =y[i*3];
double iyy =y[i*3+1];
double iyz =y[i*3+2];
double di = d[i]*(outflag);
double xdx,xdy,xdz,initdist,ydx,ydy,ydz,currdist,e;
double s;
int ci = i/groupsize;
int position = i % groupsize;
int loop = numNeigh/groupsize /*+ (numNeigh%groupsize != 0)*/;
double rx[4],ry[4];
int rc[4];
int stid = (i/groupsize)*groupsize;
int damage;
for(j=0;j<loop;j++){
int p = Nptr[j*groupsize + position];
rx[0] = x[p*3];
rx[1] = x[p*3+1];
rx[2] = x[p*3+2];
rx[3] = c[p];
ry[0] = y[p*3];
ry[1] = y[p*3+1];
ry[2] = y[p*3+2];
ry[3] = m[p];
for(k=0;k<groupsize;k++){
//int thisp = __shlf(p, stid+(position+k)%groupsize);
damage = (bond[j*groupsize+(position+k)%groupsize]>>position)&0x00000001;
double xjx = __shfl_sync(FULLMASK,rx[0], stid+(position+k)%groupsize, 32);
double xjy = __shfl_sync(FULLMASK,rx[1], stid+(position+k)%groupsize, 32);
double xjz = __shfl_sync(FULLMASK,rx[2], stid+(position+k)%groupsize, 32);
double cj = __shfl_sync(FULLMASK,rx[3], stid+(position+k)%groupsize, 32);
double yjx = __shfl_sync(FULLMASK,ry[0], stid+(position+k)%groupsize, 32);
double yjy = __shfl_sync(FULLMASK,ry[1], stid+(position+k)%groupsize, 32);
double yjz = __shfl_sync(FULLMASK,ry[2], stid+(position+k)%groupsize, 32);
double mj = __shfl_sync(FULLMASK,ry[3], stid+(position+k)%groupsize, 32);
xdx = ixx - xjx;
xdy = ixy - xjy;
xdz = ixz - xjz;
initdist = xdx*xdx + xdy*xdy + xdz*xdz;
int flag = (initdist<=horizon*horizon);
ydx = yjx - iyx;
ydy = yjy - iyy;
ydz = yjz - iyz;
currdist = ydx*ydx + ydy*ydy + ydz*ydz;
double currinit = sqrt(initdist*currdist);
//e = currdist - initdist;
s = (currdist- 2*currinit + initdist);
int bflag = (s>0.02*0.02*initdist)*((currdist-initdist)>0);
damage = bflag | damage;
di += flag*3*(1-damage)*(currinit - initdist)*cj*m[i];
rc[(k+position)%groupsize] = damage<<position;
//atomicOr(&bond[j*groupsize+(k+position)%groupsize],damage<<position);
}
int dam = 0;
dam |= rc[position];
for(k=1;k<groupsize;k++){
dam |= __shfl_sync(FULLMASK,rc[(position+k)%groupsize], stid+(position-k+groupsize)%groupsize, 32);
}
bond[j*groupsize+position] = dam;
}
//rest = numNeigh % groupsize;
for(int n = j*groupsize; n<numNeigh; n++){
int p = Nptr[n];
xdx = ixx - x[p*3];
xdy = ixy - x[p*3+1];
xdz = ixz - x[p*3+2];
int damage = (bond[n] >> position)&0x00000001;
//int damage = 0;
initdist = xdx*xdx + xdy*xdy + xdz*xdz;
int flag = (initdist<=horizon*horizon);
ydx = y[p*3] - iyx;
ydy = y[p*3+1] - iyy;
ydz = y[p*3+2] - iyz;
currdist = ydx*ydx + ydy*ydy + ydz*ydz;
double currinit = sqrt(initdist*currdist);
//e = currdist - initdist;
s = (currdist - 2*currinit + initdist);
int bflag = (s>0.02*0.02*initdist)*((currdist-initdist)>0);
damage = bflag | damage;
//if(s > 0.02)
// *bond = 1.0;
di += flag*3*(1-damage)*(currinit - initdist )*c[p]*m[i];
atomicOr(&bond[n],damage<<position);
}
d[i] = di;
}
}
void Dkernel_Interface(GParam* param, int numOwnedPoints, double horizon, cudaStream_t stream0, int flag){
int nthreads = numOwnedPoints+param->TotalImport;
int nblocks = (numOwnedPoints+param->TotalImport)/BLOCKSIZE+1;
DilatationKernel<<<nblocks,BLOCKSIZE,0,stream0>>>(param->x, param->y, param->weightvolume, param->cellvolume, param->dilatation, param->mybondDamage, param->neiborlist, numOwnedPoints, horizon, param->neighborPtr,flag);
}
__global__ void ForceKernel(double *x, double *y, double*m, double* c, double *d, int* bonddamage, double *f, int * neighborlist, int numOwnedpoints,double bulkModulus,double shearModulus, double horizon, int* neighborPtr, double* a, double* v, double* u, double density, double dt, double* yout,int outflag){
int i,j,k;
i = blockDim.x * blockIdx.x + threadIdx.x;
numOwnedpoints = numOwnedpoints + (groupsize - numOwnedpoints%groupsize)*(numOwnedpoints%groupsize!=0);
if(i< numOwnedpoints){
int *Nptr = &neighborlist[neighborPtr[i/groupsize]];
int st,ed;
int numNeigh = ((*Nptr)&0x0000ffff)-outflag*(((*Nptr)&0xffff0000)>>16);
int *bond = &bonddamage[neighborPtr[i/groupsize]];
int mid = ((*Nptr)&0xffff0000)>>16;
int total = ((*Nptr)&0x0000ffff);
st = outflag*mid;
ed = mid + outflag*(total - mid);
Nptr++;
Nptr += st;
bond += st;
numNeigh = ed-st;
/*int *Nptr = &neighborlist[neighborPtr[i/groupsize]];
int *bond = &bonddamage[neighborPtr[i/groupsize]];
int numNeigh = ((*Nptr)&0x0000ffff);
Nptr++;*/
double K = bulkModulus;
double MU = shearModulus;
double ixx =x[i*3];
double ixy =x[i*3+1];
double ixz =x[i*3+2];
double iyx =y[i*3];
double iyy =y[i*3+1];
double iyz =y[i*3+2];
double fxi = f[i*3]*outflag;
double fyi = f[i*3+1]*outflag;
double fzi = f[i*3+2]*outflag;
double xdx,xdy,xdz,initdist,ydx,ydy,ydz,currdist,e;
int position = i % groupsize;
int ci = i/groupsize;
//int position = i % groupsize;
int loop = numNeigh/groupsize /*+ (numNeigh%groupsize != 0)*/;
double rx[4],ry[4],rd;
int rc[4];
int stid = (i/groupsize)*groupsize;
int damage;
long i1;
long magic = 0x5fe6ec85e7de30da;
double x2,q,r,z;
double thr = 1.0/3.0;
for(j=0;j<loop;j++){
int p = Nptr[j*groupsize + position];
rx[0] = x[p*3];
rx[1] = x[p*3+1];
rx[2] = x[p*3+2];
rx[3] = c[p];
ry[0] = y[p*3];
ry[1] = y[p*3+1];
ry[2] = y[p*3+2];
ry[3] = m[p];
rd = d[p];
for(k=0;k<groupsize;k++){
damage = (bond[j*groupsize+(k+position)%groupsize]>>position)&0x00000001;
double xjx = __shfl_sync(FULLMASK,rx[0], stid+(position+k)%groupsize, 32);
double xjy = __shfl_sync(FULLMASK,rx[1], stid+(position+k)%groupsize, 32);
double xjz = __shfl_sync(FULLMASK,rx[2], stid+(position+k)%groupsize, 32);
double cj = __shfl_sync(FULLMASK,rx[3], stid+(position+k)%groupsize, 32);
double yjx = __shfl_sync(FULLMASK,ry[0], stid+(position+k)%groupsize, 32);
double yjy = __shfl_sync(FULLMASK,ry[1], stid+(position+k)%groupsize, 32);
double yjz = __shfl_sync(FULLMASK,ry[2], stid+(position+k)%groupsize, 32);
double mj = __shfl_sync(FULLMASK,ry[3], stid+(position+k)%groupsize, 32);
double dj = __shfl_sync(FULLMASK,rd, stid+(position+k)%groupsize, 32);
xdx = ixx - xjx;
xdy = ixy - xjy;
xdz = ixz - xjz;
double initdist2 = xdx*xdx + xdy*xdy + xdz*xdz;
initdist = sqrt(xdx*xdx + xdy*xdy + xdz*xdz);
int flag = (initdist<=horizon);
ydx = yjx - iyx;
ydy = yjy - iyy;
ydz = yjz - iyz;
currdist = ydx*ydx + ydy*ydy + ydz*ydz;
x2 = currdist * 0.5;
i1 = *(long *)&currdist;
i1 = magic - (i1>>1);
z = *(double *)&i1;
z = z*(1.5-x2*z*z);
z = z*(1.5-x2*z*z);
z = z*(1.5-x2*z*z);
z = z*(1.5-x2*z*z);
z = z*(1.5-x2*z*z);
double temp = z;
e = (1.0 - initdist*temp)*(currdist != initdist2);
//e = 1.0 - initdist*temp;
double zeroflag = (double)(currdist == 0);
double alpha = 15.0*MU*m[i];
double alphap = 15.0*MU*mj;
double c1 = 1 * d[i] * (3.0*K*m[i] - alpha*thr);
double cp = 1 * dj * (3.0*K*mj - alphap*thr);
double t = (1-damage) * (c1* initdist*temp + (1-damage)*1*alpha*e)*flag;
double tp = (1-damage) * (cp* initdist*temp + (1-damage)*1*alphap*e)*flag;
double fx = t * ydx;
double fy = t * ydy;
double fz = t * ydz;
double fxp = tp * ydx;
double fyp = tp * ydy;
double fzp = tp * ydz;
fxi += fx *cj;
fyi += fy *cj;
fzi += fz *cj;
fxi += fxp *cj;
fyi += fyp *cj;
fzi += fzp *cj;
}
}
for(int n = j*groupsize; n<numNeigh; n++){
int p = Nptr[n];
xdx = ixx - x[p*3];
xdy = ixy - x[p*3+1];
xdz = ixz - x[p*3+2];
double initdist2 = xdx*xdx + xdy*xdy + xdz*xdz;
initdist = sqrt(xdx*xdx + xdy*xdy + xdz*xdz);
damage = (bond[n] >> position)&0x00000001;
int flag = (initdist<=horizon);
//flag = (double)(initdist <= horizon);
ydx = y[p*3] - iyx;
ydy = y[p*3+1] - iyy;
ydz = y[p*3+2] - iyz;
currdist = ydx*ydx + ydy*ydy + ydz*ydz;
double zeroflag = (double)(currdist == 0);
x2 = currdist * 0.5;
i1 = *(long *)&currdist;
i1 = magic - (i1>>1);
z = *(double *)&i1;
z = z*(1.5-x2*z*z);
z = z*(1.5-x2*z*z);
z = z*(1.5-x2*z*z);
z = z*(1.5-x2*z*z);
z = z*(1.5-x2*z*z);
double temp = z;
e = (1.0 - initdist*temp)*(currdist != initdist2);
//zeroflag = 1.0e-16;
double alpha = 15.0*MU*m[i];
double alphap = 15.0*MU*m[p];
double c1 = 1 * d[i] * (3.0*K*m[i] - alpha*thr);
double cp = 1 * d[p] * (3.0*K*m[p] - alphap*thr);
double t = (1-damage) * (c1* initdist*temp + (1-damage)*1*alpha*e)*flag;
double tp = (1-damage) * (cp* initdist*temp + (1-damage)*1*alphap*e)*flag;
double fx = t * ydx;
double fy = t * ydy;
double fz = t * ydz;
double fxp = tp * ydx;
double fyp = tp * ydy;
double fzp = tp * ydz;
fxi += fx *c[p];
fyi += fy *c[p];
fzi += fz *c[p];
fxi += fxp *c[p];
fyi += fyp *c[p];
fzi += fzp *c[p];
}
f[i*3] = fxi;
f[i*3+1] = fyi;
f[i*3+2] = fzi;
double a1,a2,a3,v1,v2,v3;
a1 = fxi/density;
a2 = fyi/density;
a3 = fzi/density;
a[i*3] = a1;
a[i*3+1] = a2;
a[i*3+2] = a3;
v1 = v[i*3]+(a1*dt*0.5+a1*dt*0.5)*outflag;
v2 = v[i*3+1]+(a2*dt*0.5+ a2*dt*0.5)*outflag;
v3 = v[i*3+2]+(a3*dt*0.5 + a3*dt*0.5)*outflag;
//v2 += a2*dt*0.5;
//v3 += a2*dt*0.5;
v[i*3] = v1;
v[i*3+1]= v2;
v[i*3+2] = v3;
u[i*3] += dt*v1*outflag;
u[i*3+1] += dt*v2*outflag;
u[i*3+2] += dt*v3*outflag;
yout[i*3] = iyx+dt*v1*outflag;
yout[i*3+1] = iyy+dt*v2*outflag;
yout[i*3+2] = iyz+dt*v3*outflag;
}
}
void Fkernel_Interface(GParam* param, int numOwnedPoints, double horizon, double bulkModulus, double shearModulus, cudaStream_t stream0, int flag){
int nthreads = numOwnedPoints+param->TotalImport;
int nblocks = (numOwnedPoints+param->TotalImport)/BLOCKSIZE+1;
ForceKernel<<<nblocks,BLOCKSIZE,0,stream0>>>(param->x, param->y, param->weightvolume, param->cellvolume, param->dilatation, param->mybondDamage, param->force, param->neiborlist, numOwnedPoints, bulkModulus,shearModulus,horizon,param->neighborPtr, param->a, param->v, param->u, param->density,param->dt,param->yout,flag);
}
|
0f848adac7c69c7793ac944f4e7bffa7b42621cb.hip | // !!! This is a file automatically generated by hipify!!!
/*M///////////////////////////////////////////////////////////////////////////////////////
//
// IMPORTANT: READ BEFORE DOWNLOADING, COPYING, INSTALLING OR USING.
//
// By downloading, copying, installing or using the software you agree to this license.
// If you do not agree to this license, do not download, install,
// copy or use the software.
//
//
// License Agreement
// For Open Source Computer Vision Library
//
// Copyright (C) 2000-2008, Intel Corporation, all rights reserved.
// Copyright (C) 2009, Willow Garage Inc., all rights reserved.
// Third party copyrights are property of their respective owners.
//
// Redistribution and use in source and binary forms, with or without modification,
// are permitted provided that the following conditions are met:
//
// * Redistribution's of source code must retain the above copyright notice,
// this list of conditions and the following disclaimer.
//
// * Redistribution's in binary form must reproduce the above copyright notice,
// this list of conditions and the following disclaimer in the documentation
// and/or other materials provided with the distribution.
//
// * The name of the copyright holders may not be used to endorse or promote products
// derived from this software without specific prior written permission.
//
// This software is provided by the copyright holders and contributors "as is" and
// any express or implied warranties, including, but not limited to, the implied
// warranties of merchantability and fitness for a particular purpose are disclaimed.
// In no event shall the Intel Corporation or contributors be liable for any direct,
// indirect, incidental, special, exemplary, or consequential damages
// (including, but not limited to, procurement of substitute goods or services;
// loss of use, data, or profits; or business interruption) however caused
// and on any theory of liability, whether in contract, strict liability,
// or tort (including negligence or otherwise) arising in any way out of
// the use of this software, even if advised of the possibility of such damage.
//
//M*/
#if !defined CUDA_DISABLER
#include "opencv2/core/cuda/common.hpp"
#include "opencv2/core/cuda/functional.hpp"
#include "opencv2/core/cuda/transform.hpp"
#include "opencv2/core/cuda/saturate_cast.hpp"
#include "opencv2/core/cuda/simd_functions.hpp"
#include "opencv2/core/cuda/vec_math.hpp"
#include "arithm_func_traits.hpp"
using namespace cv::gpu;
using namespace cv::gpu::cudev;
namespace arithm
{
template <class Op, typename T>
struct Cmp : binary_function<T, T, uchar>
{
__device__ __forceinline__ uchar operator()(T a, T b) const
{
Op op;
return -op(a, b);
}
};
#define TYPE_VEC(type, cn) typename TypeVec<type, cn>::vec_type
template <class Op, typename T, int cn> struct CmpScalar;
template <class Op, typename T>
struct CmpScalar<Op, T, 1> : unary_function<T, uchar>
{
T val;
__host__ explicit CmpScalar(T val_) : val(val_) {}
__device__ __forceinline__ uchar operator()(T src) const
{
Cmp<Op, T> op;
return op(src, val);
}
};
template <class Op, typename T>
struct CmpScalar<Op, T, 2> : unary_function<TYPE_VEC(T, 2), TYPE_VEC(uchar, 2)>
{
TYPE_VEC(T, 2) val;
__host__ explicit CmpScalar(TYPE_VEC(T, 2) val_) : val(val_) {}
__device__ __forceinline__ TYPE_VEC(uchar, 2) operator()(const TYPE_VEC(T, 2) & src) const
{
Cmp<Op, T> op;
return VecTraits<TYPE_VEC(uchar, 2)>::make(op(src.x, val.x), op(src.y, val.y));
}
};
template <class Op, typename T>
struct CmpScalar<Op, T, 3> : unary_function<TYPE_VEC(T, 3), TYPE_VEC(uchar, 3)>
{
TYPE_VEC(T, 3) val;
__host__ explicit CmpScalar(TYPE_VEC(T, 3) val_) : val(val_) {}
__device__ __forceinline__ TYPE_VEC(uchar, 3) operator()(const TYPE_VEC(T, 3) & src) const
{
Cmp<Op, T> op;
return VecTraits<TYPE_VEC(uchar, 3)>::make(op(src.x, val.x), op(src.y, val.y), op(src.z, val.z));
}
};
template <class Op, typename T>
struct CmpScalar<Op, T, 4> : unary_function<TYPE_VEC(T, 4), TYPE_VEC(uchar, 4)>
{
TYPE_VEC(T, 4) val;
__host__ explicit CmpScalar(TYPE_VEC(T, 4) val_) : val(val_) {}
__device__ __forceinline__ TYPE_VEC(uchar, 4) operator()(const TYPE_VEC(T, 4) & src) const
{
Cmp<Op, T> op;
return VecTraits<TYPE_VEC(uchar, 4)>::make(op(src.x, val.x), op(src.y, val.y), op(src.z, val.z), op(src.w, val.w));
}
};
#undef TYPE_VEC
}
namespace cv { namespace gpu { namespace cudev
{
template <class Op, typename T> struct TransformFunctorTraits< arithm::CmpScalar<Op, T, 1> > : arithm::ArithmFuncTraits<sizeof(T), sizeof(uchar)>
{
};
}}}
namespace arithm
{
template <template <typename> class Op, typename T, int cn>
void cmpScalar(PtrStepSzb src, double val[4], PtrStepSzb dst, hipStream_t stream)
{
typedef typename TypeVec<T, cn>::vec_type src_t;
typedef typename TypeVec<uchar, cn>::vec_type dst_t;
T sval[] = {static_cast<T>(val[0]), static_cast<T>(val[1]), static_cast<T>(val[2]), static_cast<T>(val[3])};
src_t val1 = VecTraits<src_t>::make(sval);
CmpScalar<Op<T>, T, cn> op(val1);
cudev::transform((PtrStepSz<src_t>) src, (PtrStepSz<dst_t>) dst, op, WithOutMask(), stream);
}
template <typename T> void cmpScalarEq(PtrStepSzb src, int cn, double val[4], PtrStepSzb dst, hipStream_t stream)
{
typedef void (*func_t)(PtrStepSzb src, double val[4], PtrStepSzb dst, hipStream_t stream);
static const func_t funcs[] =
{
0,
cmpScalar<equal_to, T, 1>,
cmpScalar<equal_to, T, 2>,
cmpScalar<equal_to, T, 3>,
cmpScalar<equal_to, T, 4>
};
funcs[cn](src, val, dst, stream);
}
template <typename T> void cmpScalarNe(PtrStepSzb src, int cn, double val[4], PtrStepSzb dst, hipStream_t stream)
{
typedef void (*func_t)(PtrStepSzb src, double val[4], PtrStepSzb dst, hipStream_t stream);
static const func_t funcs[] =
{
0,
cmpScalar<not_equal_to, T, 1>,
cmpScalar<not_equal_to, T, 2>,
cmpScalar<not_equal_to, T, 3>,
cmpScalar<not_equal_to, T, 4>
};
funcs[cn](src, val, dst, stream);
}
template <typename T> void cmpScalarLt(PtrStepSzb src, int cn, double val[4], PtrStepSzb dst, hipStream_t stream)
{
typedef void (*func_t)(PtrStepSzb src, double val[4], PtrStepSzb dst, hipStream_t stream);
static const func_t funcs[] =
{
0,
cmpScalar<less, T, 1>,
cmpScalar<less, T, 2>,
cmpScalar<less, T, 3>,
cmpScalar<less, T, 4>
};
funcs[cn](src, val, dst, stream);
}
template <typename T> void cmpScalarLe(PtrStepSzb src, int cn, double val[4], PtrStepSzb dst, hipStream_t stream)
{
typedef void (*func_t)(PtrStepSzb src, double val[4], PtrStepSzb dst, hipStream_t stream);
static const func_t funcs[] =
{
0,
cmpScalar<less_equal, T, 1>,
cmpScalar<less_equal, T, 2>,
cmpScalar<less_equal, T, 3>,
cmpScalar<less_equal, T, 4>
};
funcs[cn](src, val, dst, stream);
}
template <typename T> void cmpScalarGt(PtrStepSzb src, int cn, double val[4], PtrStepSzb dst, hipStream_t stream)
{
typedef void (*func_t)(PtrStepSzb src, double val[4], PtrStepSzb dst, hipStream_t stream);
static const func_t funcs[] =
{
0,
cmpScalar<greater, T, 1>,
cmpScalar<greater, T, 2>,
cmpScalar<greater, T, 3>,
cmpScalar<greater, T, 4>
};
funcs[cn](src, val, dst, stream);
}
template <typename T> void cmpScalarGe(PtrStepSzb src, int cn, double val[4], PtrStepSzb dst, hipStream_t stream)
{
typedef void (*func_t)(PtrStepSzb src, double val[4], PtrStepSzb dst, hipStream_t stream);
static const func_t funcs[] =
{
0,
cmpScalar<greater_equal, T, 1>,
cmpScalar<greater_equal, T, 2>,
cmpScalar<greater_equal, T, 3>,
cmpScalar<greater_equal, T, 4>
};
funcs[cn](src, val, dst, stream);
}
template void cmpScalarEq<uchar >(PtrStepSzb src, int cn, double val[4], PtrStepSzb dst, hipStream_t stream);
template void cmpScalarEq<schar >(PtrStepSzb src, int cn, double val[4], PtrStepSzb dst, hipStream_t stream);
template void cmpScalarEq<ushort>(PtrStepSzb src, int cn, double val[4], PtrStepSzb dst, hipStream_t stream);
template void cmpScalarEq<short >(PtrStepSzb src, int cn, double val[4], PtrStepSzb dst, hipStream_t stream);
template void cmpScalarEq<int >(PtrStepSzb src, int cn, double val[4], PtrStepSzb dst, hipStream_t stream);
template void cmpScalarEq<float >(PtrStepSzb src, int cn, double val[4], PtrStepSzb dst, hipStream_t stream);
template void cmpScalarEq<double>(PtrStepSzb src, int cn, double val[4], PtrStepSzb dst, hipStream_t stream);
template void cmpScalarNe<uchar >(PtrStepSzb src, int cn, double val[4], PtrStepSzb dst, hipStream_t stream);
template void cmpScalarNe<schar >(PtrStepSzb src, int cn, double val[4], PtrStepSzb dst, hipStream_t stream);
template void cmpScalarNe<ushort>(PtrStepSzb src, int cn, double val[4], PtrStepSzb dst, hipStream_t stream);
template void cmpScalarNe<short >(PtrStepSzb src, int cn, double val[4], PtrStepSzb dst, hipStream_t stream);
template void cmpScalarNe<int >(PtrStepSzb src, int cn, double val[4], PtrStepSzb dst, hipStream_t stream);
template void cmpScalarNe<float >(PtrStepSzb src, int cn, double val[4], PtrStepSzb dst, hipStream_t stream);
template void cmpScalarNe<double>(PtrStepSzb src, int cn, double val[4], PtrStepSzb dst, hipStream_t stream);
template void cmpScalarLt<uchar >(PtrStepSzb src, int cn, double val[4], PtrStepSzb dst, hipStream_t stream);
template void cmpScalarLt<schar >(PtrStepSzb src, int cn, double val[4], PtrStepSzb dst, hipStream_t stream);
template void cmpScalarLt<ushort>(PtrStepSzb src, int cn, double val[4], PtrStepSzb dst, hipStream_t stream);
template void cmpScalarLt<short >(PtrStepSzb src, int cn, double val[4], PtrStepSzb dst, hipStream_t stream);
template void cmpScalarLt<int >(PtrStepSzb src, int cn, double val[4], PtrStepSzb dst, hipStream_t stream);
template void cmpScalarLt<float >(PtrStepSzb src, int cn, double val[4], PtrStepSzb dst, hipStream_t stream);
template void cmpScalarLt<double>(PtrStepSzb src, int cn, double val[4], PtrStepSzb dst, hipStream_t stream);
template void cmpScalarLe<uchar >(PtrStepSzb src, int cn, double val[4], PtrStepSzb dst, hipStream_t stream);
template void cmpScalarLe<schar >(PtrStepSzb src, int cn, double val[4], PtrStepSzb dst, hipStream_t stream);
template void cmpScalarLe<ushort>(PtrStepSzb src, int cn, double val[4], PtrStepSzb dst, hipStream_t stream);
template void cmpScalarLe<short >(PtrStepSzb src, int cn, double val[4], PtrStepSzb dst, hipStream_t stream);
template void cmpScalarLe<int >(PtrStepSzb src, int cn, double val[4], PtrStepSzb dst, hipStream_t stream);
template void cmpScalarLe<float >(PtrStepSzb src, int cn, double val[4], PtrStepSzb dst, hipStream_t stream);
template void cmpScalarLe<double>(PtrStepSzb src, int cn, double val[4], PtrStepSzb dst, hipStream_t stream);
template void cmpScalarGt<uchar >(PtrStepSzb src, int cn, double val[4], PtrStepSzb dst, hipStream_t stream);
template void cmpScalarGt<schar >(PtrStepSzb src, int cn, double val[4], PtrStepSzb dst, hipStream_t stream);
template void cmpScalarGt<ushort>(PtrStepSzb src, int cn, double val[4], PtrStepSzb dst, hipStream_t stream);
template void cmpScalarGt<short >(PtrStepSzb src, int cn, double val[4], PtrStepSzb dst, hipStream_t stream);
template void cmpScalarGt<int >(PtrStepSzb src, int cn, double val[4], PtrStepSzb dst, hipStream_t stream);
template void cmpScalarGt<float >(PtrStepSzb src, int cn, double val[4], PtrStepSzb dst, hipStream_t stream);
template void cmpScalarGt<double>(PtrStepSzb src, int cn, double val[4], PtrStepSzb dst, hipStream_t stream);
template void cmpScalarGe<uchar >(PtrStepSzb src, int cn, double val[4], PtrStepSzb dst, hipStream_t stream);
template void cmpScalarGe<schar >(PtrStepSzb src, int cn, double val[4], PtrStepSzb dst, hipStream_t stream);
template void cmpScalarGe<ushort>(PtrStepSzb src, int cn, double val[4], PtrStepSzb dst, hipStream_t stream);
template void cmpScalarGe<short >(PtrStepSzb src, int cn, double val[4], PtrStepSzb dst, hipStream_t stream);
template void cmpScalarGe<int >(PtrStepSzb src, int cn, double val[4], PtrStepSzb dst, hipStream_t stream);
template void cmpScalarGe<float >(PtrStepSzb src, int cn, double val[4], PtrStepSzb dst, hipStream_t stream);
template void cmpScalarGe<double>(PtrStepSzb src, int cn, double val[4], PtrStepSzb dst, hipStream_t stream);
}
#endif // CUDA_DISABLER
| 0f848adac7c69c7793ac944f4e7bffa7b42621cb.cu | /*M///////////////////////////////////////////////////////////////////////////////////////
//
// IMPORTANT: READ BEFORE DOWNLOADING, COPYING, INSTALLING OR USING.
//
// By downloading, copying, installing or using the software you agree to this license.
// If you do not agree to this license, do not download, install,
// copy or use the software.
//
//
// License Agreement
// For Open Source Computer Vision Library
//
// Copyright (C) 2000-2008, Intel Corporation, all rights reserved.
// Copyright (C) 2009, Willow Garage Inc., all rights reserved.
// Third party copyrights are property of their respective owners.
//
// Redistribution and use in source and binary forms, with or without modification,
// are permitted provided that the following conditions are met:
//
// * Redistribution's of source code must retain the above copyright notice,
// this list of conditions and the following disclaimer.
//
// * Redistribution's in binary form must reproduce the above copyright notice,
// this list of conditions and the following disclaimer in the documentation
// and/or other materials provided with the distribution.
//
// * The name of the copyright holders may not be used to endorse or promote products
// derived from this software without specific prior written permission.
//
// This software is provided by the copyright holders and contributors "as is" and
// any express or implied warranties, including, but not limited to, the implied
// warranties of merchantability and fitness for a particular purpose are disclaimed.
// In no event shall the Intel Corporation or contributors be liable for any direct,
// indirect, incidental, special, exemplary, or consequential damages
// (including, but not limited to, procurement of substitute goods or services;
// loss of use, data, or profits; or business interruption) however caused
// and on any theory of liability, whether in contract, strict liability,
// or tort (including negligence or otherwise) arising in any way out of
// the use of this software, even if advised of the possibility of such damage.
//
//M*/
#if !defined CUDA_DISABLER
#include "opencv2/core/cuda/common.hpp"
#include "opencv2/core/cuda/functional.hpp"
#include "opencv2/core/cuda/transform.hpp"
#include "opencv2/core/cuda/saturate_cast.hpp"
#include "opencv2/core/cuda/simd_functions.hpp"
#include "opencv2/core/cuda/vec_math.hpp"
#include "arithm_func_traits.hpp"
using namespace cv::gpu;
using namespace cv::gpu::cudev;
namespace arithm
{
template <class Op, typename T>
struct Cmp : binary_function<T, T, uchar>
{
__device__ __forceinline__ uchar operator()(T a, T b) const
{
Op op;
return -op(a, b);
}
};
#define TYPE_VEC(type, cn) typename TypeVec<type, cn>::vec_type
template <class Op, typename T, int cn> struct CmpScalar;
template <class Op, typename T>
struct CmpScalar<Op, T, 1> : unary_function<T, uchar>
{
T val;
__host__ explicit CmpScalar(T val_) : val(val_) {}
__device__ __forceinline__ uchar operator()(T src) const
{
Cmp<Op, T> op;
return op(src, val);
}
};
template <class Op, typename T>
struct CmpScalar<Op, T, 2> : unary_function<TYPE_VEC(T, 2), TYPE_VEC(uchar, 2)>
{
TYPE_VEC(T, 2) val;
__host__ explicit CmpScalar(TYPE_VEC(T, 2) val_) : val(val_) {}
__device__ __forceinline__ TYPE_VEC(uchar, 2) operator()(const TYPE_VEC(T, 2) & src) const
{
Cmp<Op, T> op;
return VecTraits<TYPE_VEC(uchar, 2)>::make(op(src.x, val.x), op(src.y, val.y));
}
};
template <class Op, typename T>
struct CmpScalar<Op, T, 3> : unary_function<TYPE_VEC(T, 3), TYPE_VEC(uchar, 3)>
{
TYPE_VEC(T, 3) val;
__host__ explicit CmpScalar(TYPE_VEC(T, 3) val_) : val(val_) {}
__device__ __forceinline__ TYPE_VEC(uchar, 3) operator()(const TYPE_VEC(T, 3) & src) const
{
Cmp<Op, T> op;
return VecTraits<TYPE_VEC(uchar, 3)>::make(op(src.x, val.x), op(src.y, val.y), op(src.z, val.z));
}
};
template <class Op, typename T>
struct CmpScalar<Op, T, 4> : unary_function<TYPE_VEC(T, 4), TYPE_VEC(uchar, 4)>
{
TYPE_VEC(T, 4) val;
__host__ explicit CmpScalar(TYPE_VEC(T, 4) val_) : val(val_) {}
__device__ __forceinline__ TYPE_VEC(uchar, 4) operator()(const TYPE_VEC(T, 4) & src) const
{
Cmp<Op, T> op;
return VecTraits<TYPE_VEC(uchar, 4)>::make(op(src.x, val.x), op(src.y, val.y), op(src.z, val.z), op(src.w, val.w));
}
};
#undef TYPE_VEC
}
namespace cv { namespace gpu { namespace cudev
{
template <class Op, typename T> struct TransformFunctorTraits< arithm::CmpScalar<Op, T, 1> > : arithm::ArithmFuncTraits<sizeof(T), sizeof(uchar)>
{
};
}}}
namespace arithm
{
template <template <typename> class Op, typename T, int cn>
void cmpScalar(PtrStepSzb src, double val[4], PtrStepSzb dst, cudaStream_t stream)
{
typedef typename TypeVec<T, cn>::vec_type src_t;
typedef typename TypeVec<uchar, cn>::vec_type dst_t;
T sval[] = {static_cast<T>(val[0]), static_cast<T>(val[1]), static_cast<T>(val[2]), static_cast<T>(val[3])};
src_t val1 = VecTraits<src_t>::make(sval);
CmpScalar<Op<T>, T, cn> op(val1);
cudev::transform((PtrStepSz<src_t>) src, (PtrStepSz<dst_t>) dst, op, WithOutMask(), stream);
}
template <typename T> void cmpScalarEq(PtrStepSzb src, int cn, double val[4], PtrStepSzb dst, cudaStream_t stream)
{
typedef void (*func_t)(PtrStepSzb src, double val[4], PtrStepSzb dst, cudaStream_t stream);
static const func_t funcs[] =
{
0,
cmpScalar<equal_to, T, 1>,
cmpScalar<equal_to, T, 2>,
cmpScalar<equal_to, T, 3>,
cmpScalar<equal_to, T, 4>
};
funcs[cn](src, val, dst, stream);
}
template <typename T> void cmpScalarNe(PtrStepSzb src, int cn, double val[4], PtrStepSzb dst, cudaStream_t stream)
{
typedef void (*func_t)(PtrStepSzb src, double val[4], PtrStepSzb dst, cudaStream_t stream);
static const func_t funcs[] =
{
0,
cmpScalar<not_equal_to, T, 1>,
cmpScalar<not_equal_to, T, 2>,
cmpScalar<not_equal_to, T, 3>,
cmpScalar<not_equal_to, T, 4>
};
funcs[cn](src, val, dst, stream);
}
template <typename T> void cmpScalarLt(PtrStepSzb src, int cn, double val[4], PtrStepSzb dst, cudaStream_t stream)
{
typedef void (*func_t)(PtrStepSzb src, double val[4], PtrStepSzb dst, cudaStream_t stream);
static const func_t funcs[] =
{
0,
cmpScalar<less, T, 1>,
cmpScalar<less, T, 2>,
cmpScalar<less, T, 3>,
cmpScalar<less, T, 4>
};
funcs[cn](src, val, dst, stream);
}
template <typename T> void cmpScalarLe(PtrStepSzb src, int cn, double val[4], PtrStepSzb dst, cudaStream_t stream)
{
typedef void (*func_t)(PtrStepSzb src, double val[4], PtrStepSzb dst, cudaStream_t stream);
static const func_t funcs[] =
{
0,
cmpScalar<less_equal, T, 1>,
cmpScalar<less_equal, T, 2>,
cmpScalar<less_equal, T, 3>,
cmpScalar<less_equal, T, 4>
};
funcs[cn](src, val, dst, stream);
}
template <typename T> void cmpScalarGt(PtrStepSzb src, int cn, double val[4], PtrStepSzb dst, cudaStream_t stream)
{
typedef void (*func_t)(PtrStepSzb src, double val[4], PtrStepSzb dst, cudaStream_t stream);
static const func_t funcs[] =
{
0,
cmpScalar<greater, T, 1>,
cmpScalar<greater, T, 2>,
cmpScalar<greater, T, 3>,
cmpScalar<greater, T, 4>
};
funcs[cn](src, val, dst, stream);
}
template <typename T> void cmpScalarGe(PtrStepSzb src, int cn, double val[4], PtrStepSzb dst, cudaStream_t stream)
{
typedef void (*func_t)(PtrStepSzb src, double val[4], PtrStepSzb dst, cudaStream_t stream);
static const func_t funcs[] =
{
0,
cmpScalar<greater_equal, T, 1>,
cmpScalar<greater_equal, T, 2>,
cmpScalar<greater_equal, T, 3>,
cmpScalar<greater_equal, T, 4>
};
funcs[cn](src, val, dst, stream);
}
template void cmpScalarEq<uchar >(PtrStepSzb src, int cn, double val[4], PtrStepSzb dst, cudaStream_t stream);
template void cmpScalarEq<schar >(PtrStepSzb src, int cn, double val[4], PtrStepSzb dst, cudaStream_t stream);
template void cmpScalarEq<ushort>(PtrStepSzb src, int cn, double val[4], PtrStepSzb dst, cudaStream_t stream);
template void cmpScalarEq<short >(PtrStepSzb src, int cn, double val[4], PtrStepSzb dst, cudaStream_t stream);
template void cmpScalarEq<int >(PtrStepSzb src, int cn, double val[4], PtrStepSzb dst, cudaStream_t stream);
template void cmpScalarEq<float >(PtrStepSzb src, int cn, double val[4], PtrStepSzb dst, cudaStream_t stream);
template void cmpScalarEq<double>(PtrStepSzb src, int cn, double val[4], PtrStepSzb dst, cudaStream_t stream);
template void cmpScalarNe<uchar >(PtrStepSzb src, int cn, double val[4], PtrStepSzb dst, cudaStream_t stream);
template void cmpScalarNe<schar >(PtrStepSzb src, int cn, double val[4], PtrStepSzb dst, cudaStream_t stream);
template void cmpScalarNe<ushort>(PtrStepSzb src, int cn, double val[4], PtrStepSzb dst, cudaStream_t stream);
template void cmpScalarNe<short >(PtrStepSzb src, int cn, double val[4], PtrStepSzb dst, cudaStream_t stream);
template void cmpScalarNe<int >(PtrStepSzb src, int cn, double val[4], PtrStepSzb dst, cudaStream_t stream);
template void cmpScalarNe<float >(PtrStepSzb src, int cn, double val[4], PtrStepSzb dst, cudaStream_t stream);
template void cmpScalarNe<double>(PtrStepSzb src, int cn, double val[4], PtrStepSzb dst, cudaStream_t stream);
template void cmpScalarLt<uchar >(PtrStepSzb src, int cn, double val[4], PtrStepSzb dst, cudaStream_t stream);
template void cmpScalarLt<schar >(PtrStepSzb src, int cn, double val[4], PtrStepSzb dst, cudaStream_t stream);
template void cmpScalarLt<ushort>(PtrStepSzb src, int cn, double val[4], PtrStepSzb dst, cudaStream_t stream);
template void cmpScalarLt<short >(PtrStepSzb src, int cn, double val[4], PtrStepSzb dst, cudaStream_t stream);
template void cmpScalarLt<int >(PtrStepSzb src, int cn, double val[4], PtrStepSzb dst, cudaStream_t stream);
template void cmpScalarLt<float >(PtrStepSzb src, int cn, double val[4], PtrStepSzb dst, cudaStream_t stream);
template void cmpScalarLt<double>(PtrStepSzb src, int cn, double val[4], PtrStepSzb dst, cudaStream_t stream);
template void cmpScalarLe<uchar >(PtrStepSzb src, int cn, double val[4], PtrStepSzb dst, cudaStream_t stream);
template void cmpScalarLe<schar >(PtrStepSzb src, int cn, double val[4], PtrStepSzb dst, cudaStream_t stream);
template void cmpScalarLe<ushort>(PtrStepSzb src, int cn, double val[4], PtrStepSzb dst, cudaStream_t stream);
template void cmpScalarLe<short >(PtrStepSzb src, int cn, double val[4], PtrStepSzb dst, cudaStream_t stream);
template void cmpScalarLe<int >(PtrStepSzb src, int cn, double val[4], PtrStepSzb dst, cudaStream_t stream);
template void cmpScalarLe<float >(PtrStepSzb src, int cn, double val[4], PtrStepSzb dst, cudaStream_t stream);
template void cmpScalarLe<double>(PtrStepSzb src, int cn, double val[4], PtrStepSzb dst, cudaStream_t stream);
template void cmpScalarGt<uchar >(PtrStepSzb src, int cn, double val[4], PtrStepSzb dst, cudaStream_t stream);
template void cmpScalarGt<schar >(PtrStepSzb src, int cn, double val[4], PtrStepSzb dst, cudaStream_t stream);
template void cmpScalarGt<ushort>(PtrStepSzb src, int cn, double val[4], PtrStepSzb dst, cudaStream_t stream);
template void cmpScalarGt<short >(PtrStepSzb src, int cn, double val[4], PtrStepSzb dst, cudaStream_t stream);
template void cmpScalarGt<int >(PtrStepSzb src, int cn, double val[4], PtrStepSzb dst, cudaStream_t stream);
template void cmpScalarGt<float >(PtrStepSzb src, int cn, double val[4], PtrStepSzb dst, cudaStream_t stream);
template void cmpScalarGt<double>(PtrStepSzb src, int cn, double val[4], PtrStepSzb dst, cudaStream_t stream);
template void cmpScalarGe<uchar >(PtrStepSzb src, int cn, double val[4], PtrStepSzb dst, cudaStream_t stream);
template void cmpScalarGe<schar >(PtrStepSzb src, int cn, double val[4], PtrStepSzb dst, cudaStream_t stream);
template void cmpScalarGe<ushort>(PtrStepSzb src, int cn, double val[4], PtrStepSzb dst, cudaStream_t stream);
template void cmpScalarGe<short >(PtrStepSzb src, int cn, double val[4], PtrStepSzb dst, cudaStream_t stream);
template void cmpScalarGe<int >(PtrStepSzb src, int cn, double val[4], PtrStepSzb dst, cudaStream_t stream);
template void cmpScalarGe<float >(PtrStepSzb src, int cn, double val[4], PtrStepSzb dst, cudaStream_t stream);
template void cmpScalarGe<double>(PtrStepSzb src, int cn, double val[4], PtrStepSzb dst, cudaStream_t stream);
}
#endif // CUDA_DISABLER
|
37a93d3fff2fc70f1d4f38cef2bb78765e9c47b6.hip | // !!! This is a file automatically generated by hipify!!!
#include <getopt.h>
#include <stdbool.h>
#include <stdio.h>
#include <stdlib.h>
#include <string.h>
extern "C" {
#include "libs/bitmap.h"
}
#define ERROR_EXIT -1
#define cudaErrorCheck(ans) \
{ gpuAssert((ans), __FILE__, __LINE__); }
inline void gpuAssert(hipError_t code, const char *file, int line,
bool abort = true) {
if (code != hipSuccess) {
fprintf(stderr, "GPUassert: %s %s %s %d\n", hipGetErrorName(code),
hipGetErrorString(code), file, line);
if (abort)
exit(code);
}
}
// Convolutional Filter Examples, each with dimension 3,
// gaussian filter with dimension 5
// If you apply another filter, remember not only to exchange
// the filter but also the filterFactor and the correct dimension.
// int const sobelYFilter[] = {-1, -2, -1, 0, 0, 0, 1, 2, 1};
// float const sobelYFilterFactor = (float)1.0;
//
// int const sobelXFilter[] = {-1, -0, -1, -2, 0, -2, -1, 0, -1, 0};
// float const sobelXFilterFactor = (float)1.0;
int const laplacian1Filter[] = {-1, -4, -1, -4, 20, -4, -1, -4, -1};
float const laplacian1FilterFactor = (float)1.0;
// int const laplacian2Filter[] = {0, 1, 0, 1, -4, 1, 0, 1, 0};
// float const laplacian2FilterFactor = (float)1.0;
//
// int const laplacian3Filter[] = {-1, -1, -1, -1, 8, -1, -1, -1, -1};
// float const laplacian3FilterFactor = (float)1.0;
//
//// Bonus Filter:
//
// int const gaussianFilter[] = {1, 4, 6, 4, 1, 4, 16, 24, 16, 4, 6, 24, 36,
// 24, 6, 4, 16, 24, 16, 4, 1, 4, 6, 4, 1};
//
// float const gaussianFilterFactor = (float)1.0 / 256.0;
// Apply convolutional filter on image data
void applyFilter(unsigned char **out, unsigned char **in, unsigned int width,
unsigned int height, int *filter, unsigned int filterDim,
float filterFactor) {
unsigned int const filterCenter = (filterDim / 2);
for (unsigned int y = 0; y < height; y++) {
for (unsigned int x = 0; x < width; x++) {
int aggregate = 0;
for (unsigned int ky = 0; ky < filterDim; ky++) {
int nky = filterDim - 1 - ky;
for (unsigned int kx = 0; kx < filterDim; kx++) {
int nkx = filterDim - 1 - kx;
int yy = y + (ky - filterCenter);
int xx = x + (kx - filterCenter);
if (xx >= 0 && xx < (int)width && yy >= 0 && yy < (int)height)
aggregate += in[yy][xx] * filter[nky * filterDim + nkx];
}
}
aggregate *= filterFactor;
if (aggregate > 0) {
out[y][x] = (aggregate > 255) ? 255 : aggregate;
} else {
out[y][x] = 0;
}
}
}
}
void help(char const *exec, char const opt, char const *optarg) {
FILE *out = stdout;
if (opt != 0) {
out = stderr;
if (optarg) {
fprintf(out, "Invalid parameter - %c %s\n", opt, optarg);
} else {
fprintf(out, "Invalid parameter - %c\n", opt);
}
}
fprintf(out, "%s [options] <input-bmp> <output-bmp>\n", exec);
fprintf(out, "\n");
fprintf(out, "Options:\n");
fprintf(out, " -i, --iterations <iterations> number of iterations (1)\n");
fprintf(out, "\n");
fprintf(out, "Example: %s in.bmp out.bmp -i 10000\n", exec);
}
int main(int argc, char **argv) {
/*
Parameter parsing, don't change this!
*/
unsigned int iterations = 1;
char *output = NULL;
char *input = NULL;
int ret = 0;
static struct option const long_options[] = {
{"help", no_argument, 0, 'h'},
{"iterations", required_argument, 0, 'i'},
{0, 0, 0, 0}};
static char const *short_options = "hi:";
{
char *endptr;
int c;
int option_index = 0;
while ((c = getopt_long(argc, argv, short_options, long_options,
&option_index)) != -1) {
switch (c) {
case 'h':
help(argv[0], 0, NULL);
return 0;
case 'i':
iterations = strtol(optarg, &endptr, 10);
if (endptr == optarg) {
help(argv[0], c, optarg);
return ERROR_EXIT;
}
break;
default:
abort();
}
}
}
if (argc <= (optind + 1)) {
help(argv[0], ' ', "Not enough arugments");
return ERROR_EXIT;
}
input = (char *)calloc(strlen(argv[optind]) + 1, sizeof(char));
strncpy(input, argv[optind], strlen(argv[optind]));
optind++;
output = (char *)calloc(strlen(argv[optind]) + 1, sizeof(char));
strncpy(output, argv[optind], strlen(argv[optind]));
optind++;
/*
End of Parameter parsing!
*/
/*
Create the BMP image and load it from disk.
*/
bmpImage *image = newBmpImage(0, 0);
if (image == NULL) {
fprintf(stderr, "Could not allocate new image!\n");
}
if (loadBmpImage(image, input) != 0) {
fprintf(stderr, "Could not load bmp image '%s'!\n", input);
freeBmpImage(image);
return ERROR_EXIT;
}
// Create a single color channel image. It is easier to work just with one
// color
bmpImageChannel *imageChannel =
newBmpImageChannel(image->width, image->height);
if (imageChannel == NULL) {
fprintf(stderr, "Could not allocate new image channel!\n");
freeBmpImage(image);
return ERROR_EXIT;
}
// Extract from the loaded image an average over all colors - nothing else
// than a black and white representation extractImageChannel and
// mapImageChannel need the images to be in the exact same dimensions! Other
// prepared extraction functions are extractRed, extractGreen, extractBlue
if (extractImageChannel(imageChannel, image, extractAverage) != 0) {
fprintf(stderr, "Could not extract image channel!\n");
freeBmpImage(image);
freeBmpImageChannel(imageChannel);
return ERROR_EXIT;
}
// Here we do the actual computation!
// imageChannel->data is a 2-dimensional array of unsigned char which is
// accessed row first ([y][x])
bmpImageChannel *processImageChannel =
newBmpImageChannel(imageChannel->width, imageChannel->height);
for (unsigned int i = 0; i < iterations; i++) {
applyFilter(
processImageChannel->data, imageChannel->data, imageChannel->width,
imageChannel->height, (int *)laplacian1Filter, 3, laplacian1FilterFactor
// (int *)laplacian2Filter, 3, laplacian2FilterFactor
// (int *)laplacian3Filter, 3, laplacian3FilterFactor
//(int *)gaussianFilter, 5, gaussianFilterFactor
);
// Swap the data pointers
unsigned char **tmp = processImageChannel->data;
processImageChannel->data = imageChannel->data;
imageChannel->data = tmp;
unsigned char *tmp_raw = processImageChannel->rawdata;
processImageChannel->rawdata = imageChannel->rawdata;
imageChannel->rawdata = tmp_raw;
}
freeBmpImageChannel(processImageChannel);
// Map our single color image back to a normal BMP image with 3 color channels
// mapEqual puts the color value on all three channels the same way
// other mapping functions are mapRed, mapGreen, mapBlue
if (mapImageChannel(image, imageChannel, mapEqual) != 0) {
fprintf(stderr, "Could not map image channel!\n");
freeBmpImage(image);
freeBmpImageChannel(imageChannel);
return ERROR_EXIT;
}
freeBmpImageChannel(imageChannel);
// Write the image back to disk
if (saveBmpImage(image, output) != 0) {
fprintf(stderr, "Could not save output to '%s'!\n", output);
freeBmpImage(image);
return ERROR_EXIT;
};
ret = 0;
if (input)
free(input);
if (output)
free(output);
return ret;
};
| 37a93d3fff2fc70f1d4f38cef2bb78765e9c47b6.cu | #include <getopt.h>
#include <stdbool.h>
#include <stdio.h>
#include <stdlib.h>
#include <string.h>
extern "C" {
#include "libs/bitmap.h"
}
#define ERROR_EXIT -1
#define cudaErrorCheck(ans) \
{ gpuAssert((ans), __FILE__, __LINE__); }
inline void gpuAssert(cudaError_t code, const char *file, int line,
bool abort = true) {
if (code != cudaSuccess) {
fprintf(stderr, "GPUassert: %s %s %s %d\n", cudaGetErrorName(code),
cudaGetErrorString(code), file, line);
if (abort)
exit(code);
}
}
// Convolutional Filter Examples, each with dimension 3,
// gaussian filter with dimension 5
// If you apply another filter, remember not only to exchange
// the filter but also the filterFactor and the correct dimension.
// int const sobelYFilter[] = {-1, -2, -1, 0, 0, 0, 1, 2, 1};
// float const sobelYFilterFactor = (float)1.0;
//
// int const sobelXFilter[] = {-1, -0, -1, -2, 0, -2, -1, 0, -1, 0};
// float const sobelXFilterFactor = (float)1.0;
int const laplacian1Filter[] = {-1, -4, -1, -4, 20, -4, -1, -4, -1};
float const laplacian1FilterFactor = (float)1.0;
// int const laplacian2Filter[] = {0, 1, 0, 1, -4, 1, 0, 1, 0};
// float const laplacian2FilterFactor = (float)1.0;
//
// int const laplacian3Filter[] = {-1, -1, -1, -1, 8, -1, -1, -1, -1};
// float const laplacian3FilterFactor = (float)1.0;
//
//// Bonus Filter:
//
// int const gaussianFilter[] = {1, 4, 6, 4, 1, 4, 16, 24, 16, 4, 6, 24, 36,
// 24, 6, 4, 16, 24, 16, 4, 1, 4, 6, 4, 1};
//
// float const gaussianFilterFactor = (float)1.0 / 256.0;
// Apply convolutional filter on image data
void applyFilter(unsigned char **out, unsigned char **in, unsigned int width,
unsigned int height, int *filter, unsigned int filterDim,
float filterFactor) {
unsigned int const filterCenter = (filterDim / 2);
for (unsigned int y = 0; y < height; y++) {
for (unsigned int x = 0; x < width; x++) {
int aggregate = 0;
for (unsigned int ky = 0; ky < filterDim; ky++) {
int nky = filterDim - 1 - ky;
for (unsigned int kx = 0; kx < filterDim; kx++) {
int nkx = filterDim - 1 - kx;
int yy = y + (ky - filterCenter);
int xx = x + (kx - filterCenter);
if (xx >= 0 && xx < (int)width && yy >= 0 && yy < (int)height)
aggregate += in[yy][xx] * filter[nky * filterDim + nkx];
}
}
aggregate *= filterFactor;
if (aggregate > 0) {
out[y][x] = (aggregate > 255) ? 255 : aggregate;
} else {
out[y][x] = 0;
}
}
}
}
void help(char const *exec, char const opt, char const *optarg) {
FILE *out = stdout;
if (opt != 0) {
out = stderr;
if (optarg) {
fprintf(out, "Invalid parameter - %c %s\n", opt, optarg);
} else {
fprintf(out, "Invalid parameter - %c\n", opt);
}
}
fprintf(out, "%s [options] <input-bmp> <output-bmp>\n", exec);
fprintf(out, "\n");
fprintf(out, "Options:\n");
fprintf(out, " -i, --iterations <iterations> number of iterations (1)\n");
fprintf(out, "\n");
fprintf(out, "Example: %s in.bmp out.bmp -i 10000\n", exec);
}
int main(int argc, char **argv) {
/*
Parameter parsing, don't change this!
*/
unsigned int iterations = 1;
char *output = NULL;
char *input = NULL;
int ret = 0;
static struct option const long_options[] = {
{"help", no_argument, 0, 'h'},
{"iterations", required_argument, 0, 'i'},
{0, 0, 0, 0}};
static char const *short_options = "hi:";
{
char *endptr;
int c;
int option_index = 0;
while ((c = getopt_long(argc, argv, short_options, long_options,
&option_index)) != -1) {
switch (c) {
case 'h':
help(argv[0], 0, NULL);
return 0;
case 'i':
iterations = strtol(optarg, &endptr, 10);
if (endptr == optarg) {
help(argv[0], c, optarg);
return ERROR_EXIT;
}
break;
default:
abort();
}
}
}
if (argc <= (optind + 1)) {
help(argv[0], ' ', "Not enough arugments");
return ERROR_EXIT;
}
input = (char *)calloc(strlen(argv[optind]) + 1, sizeof(char));
strncpy(input, argv[optind], strlen(argv[optind]));
optind++;
output = (char *)calloc(strlen(argv[optind]) + 1, sizeof(char));
strncpy(output, argv[optind], strlen(argv[optind]));
optind++;
/*
End of Parameter parsing!
*/
/*
Create the BMP image and load it from disk.
*/
bmpImage *image = newBmpImage(0, 0);
if (image == NULL) {
fprintf(stderr, "Could not allocate new image!\n");
}
if (loadBmpImage(image, input) != 0) {
fprintf(stderr, "Could not load bmp image '%s'!\n", input);
freeBmpImage(image);
return ERROR_EXIT;
}
// Create a single color channel image. It is easier to work just with one
// color
bmpImageChannel *imageChannel =
newBmpImageChannel(image->width, image->height);
if (imageChannel == NULL) {
fprintf(stderr, "Could not allocate new image channel!\n");
freeBmpImage(image);
return ERROR_EXIT;
}
// Extract from the loaded image an average over all colors - nothing else
// than a black and white representation extractImageChannel and
// mapImageChannel need the images to be in the exact same dimensions! Other
// prepared extraction functions are extractRed, extractGreen, extractBlue
if (extractImageChannel(imageChannel, image, extractAverage) != 0) {
fprintf(stderr, "Could not extract image channel!\n");
freeBmpImage(image);
freeBmpImageChannel(imageChannel);
return ERROR_EXIT;
}
// Here we do the actual computation!
// imageChannel->data is a 2-dimensional array of unsigned char which is
// accessed row first ([y][x])
bmpImageChannel *processImageChannel =
newBmpImageChannel(imageChannel->width, imageChannel->height);
for (unsigned int i = 0; i < iterations; i++) {
applyFilter(
processImageChannel->data, imageChannel->data, imageChannel->width,
imageChannel->height, (int *)laplacian1Filter, 3, laplacian1FilterFactor
// (int *)laplacian2Filter, 3, laplacian2FilterFactor
// (int *)laplacian3Filter, 3, laplacian3FilterFactor
//(int *)gaussianFilter, 5, gaussianFilterFactor
);
// Swap the data pointers
unsigned char **tmp = processImageChannel->data;
processImageChannel->data = imageChannel->data;
imageChannel->data = tmp;
unsigned char *tmp_raw = processImageChannel->rawdata;
processImageChannel->rawdata = imageChannel->rawdata;
imageChannel->rawdata = tmp_raw;
}
freeBmpImageChannel(processImageChannel);
// Map our single color image back to a normal BMP image with 3 color channels
// mapEqual puts the color value on all three channels the same way
// other mapping functions are mapRed, mapGreen, mapBlue
if (mapImageChannel(image, imageChannel, mapEqual) != 0) {
fprintf(stderr, "Could not map image channel!\n");
freeBmpImage(image);
freeBmpImageChannel(imageChannel);
return ERROR_EXIT;
}
freeBmpImageChannel(imageChannel);
// Write the image back to disk
if (saveBmpImage(image, output) != 0) {
fprintf(stderr, "Could not save output to '%s'!\n", output);
freeBmpImage(image);
return ERROR_EXIT;
};
ret = 0;
if (input)
free(input);
if (output)
free(output);
return ret;
};
|
ce40050f11bb9b4b403c86931fee56f0dd86e1f3.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/*
Copyright (c) 2012, MAURO BIANCO, UGO VARETTO, SWISS NATIONAL SUPERCOMPUTING CENTRE (CSCS)
All rights reserved.
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions are met:
* Redistributions of source code must retain the above copyright
notice, this list of conditions and the following disclaimer.
* Redistributions in binary form must reproduce the above copyright
notice, this list of conditions and the following disclaimer in the
documentation and/or other materials provided with the distribution.
* Neither the name of the Swiss National Supercomputing Centre (CSCS) nor the
names of its contributors may be used to endorse or promote products
derived from this software without specific prior written permission.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
DISCLAIMED. IN NO EVENT SHALL MAURO BIANCO, UGO VARETTO, OR
SWISS NATIONAL SUPERCOMPUTING CENTRE (CSCS), BE LIABLE FOR ANY
DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
#include <GCL.h>
#include <iostream>
#include <sstream>
#include <fstream>
std::ostream *filep;
#include <halo_exchange.h>
#include <string>
#include <stdlib.h>
#include <utils/layout_map.h>
#include <utils/boollist.h>
#include <sys/time.h>
#include "triplet.h"
int pid;
int nprocs;
MPI_Comm CartComm;
int dims[3] = {0,0,0};
int coords[3]={0,0,0};
int datalen, tloop;
int tot_it;
double *thedata;
double* compdata_g;
__global__ void kernel_ex(double *a, int N, int _tloop)
{
int idx = blockIdx.x * blockDim.x + threadIdx.x;
if (idx<N)
for (int i=0; i < _tloop; ++i)
a[idx] += ::pow(a[idx],01.0056);
}
void prepare_to_do_something() {
hipMalloc((void**)&compdata_g, sizeof(double)*datalen);
hipMemcpy(thedata, compdata_g, sizeof(double)*datalen, hipMemcpyDeviceToHost);
}
hipStream_t c_stream ;
void do_something() {
dim3 grid_size, block_size;
block_size.x = 32;
block_size.y = 4;
grid_size.x = datalen / (block_size.x*block_size.y) +1;
hipLaunchKernelGGL(( kernel_ex) , dim3(grid_size), dim3(block_size), 0, c_stream , compdata_g, datalen, tloop);
// hipDeviceSynchronize();
// hipStreamDestroy ( c_stream );
}
struct timeval start_tv;
struct timeval stop1_tv;
struct timeval stop2_tv;
struct timeval stop3_tv;
double lapse_time1;
double lapse_time2;
double lapse_time3;
double lapse_time4;
#ifndef PACKING_TYPE
#define PACKING_TYPE GCL::version_manual
#endif
#define B_ADD 1
#define C_ADD 2
typedef GCL::gcl_gpu arch_type;
template <typename T, typename lmap>
struct array {
T *ptr;
int n,m,l;
array(T* _p, int _n, int _m, int _l)
: ptr(_p)
, n(lmap::template find<2>(_n,_m,_l))
, m(lmap::template find<1>(_n,_m,_l))
, l(lmap::template find<0>(_n,_m,_l))
{}
T &operator()(int i, int j, int k) {
// a[(DIM1+2*H)*(DIM2+2*H)*kk+ii*(DIM2+2*H)+jj]
return ptr[l*m*lmap::template find<2>(i,j,k)+
l*lmap::template find<1>(i,j,k)+
lmap::template find<0>(i,j,k)];
}
T const &operator()(int i, int j, int k) const {
return ptr[l*m*lmap::template find<2>(i,j,k)+
l*lmap::template find<1>(i,j,k)+
lmap::template find<0>(i,j,k)];
}
operator void*() const {return reinterpret_cast<void*>(ptr);}
operator T*() const {return ptr;}
};
/** \file Example of use of halo_exchange pattern for regular
grids. The comments in the code aim at highlight the process of
instantiating and running a halo exchange pattern.
*/
inline int modulus(int __i, int __j) {
return (((((__i%__j)<0)?(__j+__i%__j):(__i%__j))));
}
/* Just and utility to print values
*/
template <typename array_t>
void printbuff(std::ostream &file, array_t const & a, int d1, int d2, int d3) {
if (d1<=7 && d2<=7 && d3<=7) {
file << "------------\n";
for (int kk=0; kk<d3; ++kk) {
file << "|";
for (int jj=0; jj<d2; ++jj) {
for (int ii=0; ii<d1; ++ii) {
file << a(ii,jj,kk);
}
file << "|\n";
}
file << "\n\n";
}
file << "------------\n\n";
}
}
template <typename ST, int I1, int I2, int I3, bool per0, bool per1, bool per2>
void run(ST & file, int DIM1, int DIM2, int DIM3, int H1, int H2, int H3, triple_t<USE_DOUBLE> *_a, triple_t<USE_DOUBLE> *_b, triple_t<USE_DOUBLE> *_c) {
typedef GCL::layout_map<I1,I2,I3> layoutmap;
array<triple_t<USE_DOUBLE>, layoutmap > a(_a, (DIM1+2*H1),(DIM2+2*H2),(DIM3+2*H3));
array<triple_t<USE_DOUBLE>, layoutmap > b(_b, (DIM1+2*H1),(DIM2+2*H2),(DIM3+2*H3));
array<triple_t<USE_DOUBLE>, layoutmap > c(_c, (DIM1+2*H1),(DIM2+2*H2),(DIM3+2*H3));
/* Just an initialization */
for (int ii=0; ii<DIM1+2*H1; ++ii)
for (int jj=0; jj<DIM2+2*H2; ++jj) {
for (int kk=0; kk<DIM3+2*H3; ++kk) {
a(ii,jj,kk) = triple_t<USE_DOUBLE>();
b(ii,jj,kk) = triple_t<USE_DOUBLE>();
c(ii,jj,kk) = triple_t<USE_DOUBLE>();
}
}
// a(0,0,0) = triple_t<USE_DOUBLE>(3000+GCL::PID, 4000+GCL::PID, 5000+GCL::PID);
// b(0,0,0) = triple_t<USE_DOUBLE>(3010+GCL::PID, 4010+GCL::PID, 5010+GCL::PID);
// c(0,0,0) = triple_t<USE_DOUBLE>(3020+GCL::PID, 4020+GCL::PID, 5020+GCL::PID);
/* The pattern type is defined with the layouts, data types and
number of dimensions.
The logical assumption done in the program is that 'i' is the
first dimension (rows), 'j' is the second, and 'k' is the
third. The first layout states that 'i' is the second dimension
in order of strides, while 'j' is the first and 'k' is the third
(just by looking at the initialization loops this shoule be
clear).
The second layout states that the first dimension in data ('i')
identify also the first dimension in the communicator. Logically,
moving on 'i' dimension from processot (p,q,r) will lead you
logically to processor (p+1,q,r). The other dimensions goes as
the others.
*/
typedef GCL::halo_exchange_generic<GCL::layout_map<0,1,2>, 3, arch_type, PACKING_TYPE > pattern_type;
/* The pattern is now instantiated with the periodicities and the
communicator. The periodicity of the communicator is
irrelevant. Setting it to be periodic is the best choice, then
GCL can deal with any periodicity easily.
*/
pattern_type he(typename pattern_type::grid_type::period_type(per0, per1, per2), CartComm);
GCL::array<GCL::halo_descriptor,3> halo_dsc;
halo_dsc[0] = GCL::halo_descriptor(H1, H1, H1, DIM1+H1-1, DIM1+2*H1);
halo_dsc[1] = GCL::halo_descriptor(H2, H2, H2, DIM2+H2-1, DIM2+2*H2);
halo_dsc[2] = GCL::halo_descriptor(H3, H3, H3, DIM3+H3-1, DIM3+2*H3);
GCL::field_on_the_fly<triple_t<USE_DOUBLE>::data_type, layoutmap, pattern_type::traits> field1(reinterpret_cast<triple_t<USE_DOUBLE>::data_type*>(a.ptr), halo_dsc);
GCL::field_on_the_fly<triple_t<USE_DOUBLE>::data_type, layoutmap, pattern_type::traits> field2(reinterpret_cast<triple_t<USE_DOUBLE>::data_type*>(b.ptr), halo_dsc);
GCL::field_on_the_fly<triple_t<USE_DOUBLE>::data_type, layoutmap, pattern_type::traits> field3(reinterpret_cast<triple_t<USE_DOUBLE>::data_type*>(c.ptr), halo_dsc);
/* Pattern is set up. This must be done only once per pattern. The
parameter must me greater or equal to the largest number of
arrays updated in a single step.
*/
//he.setup(100, halo_dsc, sizeof(double));
he.setup(3, GCL::field_on_the_fly<int,layoutmap, pattern_type::traits>(NULL,halo_dsc), sizeof(triple_t<USE_DOUBLE>)); // Estimates the size
file << "Proc: (" << coords[0] << ", " << coords[1] << ", " << coords[2] << ")\n";
/* Data is initialized in the inner region of size DIM1xDIM2
*/
for (int ii=H1; ii<DIM1+H1; ++ii)
for (int jj=H2; jj<DIM2+H2; ++jj)
for (int kk=H3; kk<DIM3+H3; ++kk) {
a(ii,jj,kk) = //(100*(pid))+
triple_t<USE_DOUBLE>(ii-H1+(DIM1)*coords[0],
jj-H2+(DIM2)*coords[1],
kk-H3+(DIM3)*coords[2]);
b(ii,jj,kk) = //(200*(pid))+
triple_t<USE_DOUBLE>(ii-H1+(DIM1)*coords[0]+B_ADD,
jj-H2+(DIM2)*coords[1]+B_ADD,
kk-H3+(DIM3)*coords[2]+B_ADD);
c(ii,jj,kk) = //300*(pid))+
triple_t<USE_DOUBLE>(ii-H1+(DIM1)*coords[0]+C_ADD,
jj-H2+(DIM2)*coords[1]+C_ADD,
kk-H3+(DIM3)*coords[2]+C_ADD);
}
file << "A \n";
printbuff(file,a, DIM1+H1+H1, DIM2+H2+H2, DIM3+H3+H3);
file << "B \n";
printbuff(file,b, DIM1+H1+H1, DIM2+H2+H2, DIM3+H3+H3);
file << "C \n";
printbuff(file,c, DIM1+H1+H1, DIM2+H2+H2, DIM3+H3+H3);
file.flush();
file << "GPU GPU GPU GPU GPU GPU GPU GPU GPU GPU GPU GPU GPU GPU GPU GPU \n";
triple_t<USE_DOUBLE>* gpu_a = 0;
triple_t<USE_DOUBLE>* gpu_b = 0;
triple_t<USE_DOUBLE>* gpu_c = 0;
hipError_t status;
status = hipMalloc( &gpu_a, (DIM1+2*H1)*(DIM2+2*H2)*(DIM3+2*H3)*sizeof(triple_t<USE_DOUBLE>));
if( !checkCudaStatus( status ) ) return;
status = hipMalloc( &gpu_b, (DIM1+2*H1)*(DIM2+2*H2)*(DIM3+2*H3)*sizeof(triple_t<USE_DOUBLE>));
if( !checkCudaStatus( status ) ) return;
status = hipMalloc( &gpu_c, (DIM1+2*H1)*(DIM2+2*H2)*(DIM3+2*H3)*sizeof(triple_t<USE_DOUBLE>));
if( !checkCudaStatus( status ) ) return;
status = hipMemcpy( gpu_a, a.ptr,
(DIM1+2*H1)*(DIM2+2*H2)*(DIM3+2*H3)*sizeof(triple_t<USE_DOUBLE>),
hipMemcpyHostToDevice );
if( !checkCudaStatus( status ) ) return;
status = hipMemcpy( gpu_b, b.ptr,
(DIM1+2*H1)*(DIM2+2*H2)*(DIM3+2*H3)*sizeof(triple_t<USE_DOUBLE>),
hipMemcpyHostToDevice );
if( !checkCudaStatus( status ) ) return;
status = hipMemcpy( gpu_c, c.ptr,
(DIM1+2*H1)*(DIM2+2*H2)*(DIM3+2*H3)*sizeof(triple_t<USE_DOUBLE>),
hipMemcpyHostToDevice );
if( !checkCudaStatus( status ) ) return;
GCL::field_on_the_fly<triple_t<USE_DOUBLE>::data_type, layoutmap, pattern_type::traits> field1_gpu(reinterpret_cast<triple_t<USE_DOUBLE>::data_type*>(gpu_a), halo_dsc);
GCL::field_on_the_fly<triple_t<USE_DOUBLE>::data_type, layoutmap, pattern_type::traits> field2_gpu(reinterpret_cast<triple_t<USE_DOUBLE>::data_type*>(gpu_b), halo_dsc);
GCL::field_on_the_fly<triple_t<USE_DOUBLE>::data_type, layoutmap, pattern_type::traits> field3_gpu(reinterpret_cast<triple_t<USE_DOUBLE>::data_type*>(gpu_c), halo_dsc);
std::vector<GCL::field_on_the_fly<triple_t<USE_DOUBLE>::data_type, layoutmap, pattern_type::traits> > vect(3);
gettimeofday(&start_tv, NULL);
MPI_Barrier(MPI_COMM_WORLD);
for (int n_it = 0; n_it < tot_it; ++n_it) {
he.post_receives();
he.pack(field1_gpu, field2_gpu, field3_gpu);
//MPI_Barrier(MPI_COMM_WORLD);
he.do_sends();
do_something();
he.wait();
he.unpack(field1_gpu, field2_gpu, field3_gpu);
// MPI_Barrier(MPI_COMM_WORLD);
}
hipDeviceSynchronize();
gettimeofday(&stop3_tv, NULL);
lapse_time4 = ((static_cast<double>(stop3_tv.tv_sec)+1/1000000.0*static_cast<double>(stop3_tv.tv_usec)) - (static_cast<double>(start_tv.tv_sec)+1/1000000.0*static_cast<double>(start_tv.tv_usec))) * 1000.0;
MPI_Barrier(MPI_COMM_WORLD);
file << "TIME PACK: " << lapse_time1 << std::endl;
file << "TIME EXCH: " << lapse_time2 << std::endl;
file << "TIME UNPK: " << lapse_time3 << std::endl;
file << "TIME ALL : " << lapse_time1+lapse_time2+lapse_time3 << std::endl;
file << "TIME TOT : " << lapse_time4 << std::endl;
status = hipMemcpy( a.ptr, gpu_a,
(DIM1+2*H1)*(DIM2+2*H2)*(DIM3+2*H3)*sizeof(triple_t<USE_DOUBLE>),
hipMemcpyDeviceToHost );
if( !checkCudaStatus( status ) ) return;
status = hipMemcpy( b.ptr, gpu_b,
(DIM1+2*H1)*(DIM2+2*H2)*(DIM3+2*H3)*sizeof(triple_t<USE_DOUBLE>),
hipMemcpyDeviceToHost );
if( !checkCudaStatus( status ) ) return;
status = hipMemcpy( c.ptr, gpu_c,
(DIM1+2*H1)*(DIM2+2*H2)*(DIM3+2*H3)*sizeof(triple_t<USE_DOUBLE>),
hipMemcpyDeviceToHost );
if( !checkCudaStatus( status ) ) return;
status = hipFree( gpu_a );
if( !checkCudaStatus( status ) ) return;
status = hipFree( gpu_b );
if( !checkCudaStatus( status ) ) return;
status = hipFree( gpu_c );
if( !checkCudaStatus( status ) ) return;
file << "\n********************************************************************************\n";
file << "A \n";
printbuff(file,a, DIM1+H1+H1, DIM2+H2+H2, DIM3+H3+H3);
file << "B \n";
printbuff(file,b, DIM1+H1+H1, DIM2+H2+H2, DIM3+H3+H3);
file << "C \n";
printbuff(file,c, DIM1+H1+H1, DIM2+H2+H2, DIM3+H3+H3);
file.flush();
int passed = true;
/* Checking the data arrived correctly in the whole region
*/
for (int ii=0; ii<DIM1+2*H1; ++ii)
for (int jj=0; jj<DIM2+2*H2; ++jj)
for (int kk=0; kk<DIM3+2*H3; ++kk) {
triple_t<USE_DOUBLE> ta;
triple_t<USE_DOUBLE> tb;
triple_t<USE_DOUBLE> tc;
int tax, tay, taz;
int tbx, tby, tbz;
int tcx, tcy, tcz;
tax = modulus(ii-H1+(DIM1)*coords[0], DIM1*dims[0]);
tbx = modulus(ii-H1+(DIM1)*coords[0], DIM1*dims[0])+B_ADD;
tcx = modulus(ii-H1+(DIM1)*coords[0], DIM1*dims[0])+C_ADD;
tay = modulus(jj-H2+(DIM2)*coords[1], DIM2*dims[1]);
tby = modulus(jj-H2+(DIM2)*coords[1], DIM2*dims[1])+B_ADD;
tcy = modulus(jj-H2+(DIM2)*coords[1], DIM2*dims[1])+C_ADD;
taz = modulus(kk-H3+(DIM3)*coords[2], DIM3*dims[2]);
tbz = modulus(kk-H3+(DIM3)*coords[2], DIM3*dims[2])+B_ADD;
tcz = modulus(kk-H3+(DIM3)*coords[2], DIM3*dims[2])+C_ADD;
if (!per0) {
if ( ((coords[0]==0) && (ii<H1)) ||
((coords[0] == dims[0]-1) && (ii >= DIM1+H1)) ) {
tax=triple_t<USE_DOUBLE>().x();
tbx=triple_t<USE_DOUBLE>().x();
tcx=triple_t<USE_DOUBLE>().x();
}
}
if (!per1) {
if ( ((coords[1]==0) && (jj<H2)) ||
((coords[1] == dims[1]-1) && (jj >= DIM2+H2)) ) {
tay=triple_t<USE_DOUBLE>().y();
tby=triple_t<USE_DOUBLE>().y();
tcy=triple_t<USE_DOUBLE>().y();
}
}
if (!per2) {
if ( ((coords[2]==0) && (kk<H3)) ||
((coords[2] == dims[2]-1) && (kk >= DIM3+H3)) ) {
taz=triple_t<USE_DOUBLE>().z();
tbz=triple_t<USE_DOUBLE>().z();
tcz=triple_t<USE_DOUBLE>().z();
}
}
ta = triple_t<USE_DOUBLE>(tax, tay, taz).floor();
tb = triple_t<USE_DOUBLE>(tbx, tby, tbz).floor();
tc = triple_t<USE_DOUBLE>(tcx, tcy, tcz).floor();
if (a(ii,jj,kk) != ta) {
passed = false;
file << ii << ", " << jj << ", " << kk << " values found != expct: "
<< "a " << a(ii,jj,kk) << " != "
<< ta
<< "\n";
}
if (b(ii,jj,kk) != tb) {
passed = false;
file << ii << ", " << jj << ", " << kk << " values found != expct: "
<< "b " << b(ii,jj,kk) << " != "
<< tb
<< "\n";
}
if (c(ii,jj,kk) != tc) {
passed = false;
file << ii << ", " << jj << ", " << kk << " values found != expct: "
<< "c " << c(ii,jj,kk) << " != "
<< tc
<< "\n";
}
}
if (passed)
file << "RESULT: PASSED!\n";
else
file << "RESULT: FAILED!\n";
}
#ifdef _GCL_GPU_
/* device_binding added by Devendar Bureddy, OSU */
void
device_binding ()
{
int local_rank=0/*, num_local_procs*/;
int dev_count, use_dev_count, my_dev_id;
char *str;
if ((str = getenv ("MV2_COMM_WORLD_LOCAL_RANK")) != NULL)
{
local_rank = atoi (str);
printf ("MV2_COMM_WORLD_LOCAL_RANK %s\n", str);
}
if ((str = getenv ("MPISPAWN_LOCAL_NPROCS")) != NULL)
{
//num_local_procs = atoi (str);
printf ("MPISPAWN_LOCAL_NPROCS %s\n", str);
}
hipGetDeviceCount (&dev_count);
if ((str = getenv ("NUM_GPU_DEVICES")) != NULL)
{
use_dev_count = atoi (str);
printf ("NUM_GPU_DEVICES %s\n", str);
}
else
{
use_dev_count = dev_count;
}
my_dev_id = local_rank % use_dev_count;
printf ("local rank = %d dev id = %d\n", local_rank, my_dev_id);
hipSetDevice (my_dev_id);
}
#endif
int main(int argc, char** argv) {
#ifdef _GCL_GPU_
device_binding();
#endif
/* this example is based on MPI Cart Communicators, so we need to
initialize MPI. This can be done by GCL automatically
*/
GCL::GCL_Init(argc, argv);
/* Now let us initialize GCL itself. If MPI is not initialized at
this point, it will initialize it
*/
GCL::GCL_Init(argc, argv);
/* Here we compute the computing gris as in many applications
*/
MPI_Comm_rank(MPI_COMM_WORLD, &pid);
MPI_Comm_size(MPI_COMM_WORLD, &nprocs);
if (argc != 9) {
if (pid==0) {
std::cout << "Usage: " << argv[0] << " dim1 dim2 dim3 h datalen tloop tot_it suffix\n"
<< "where:\n"
<< "dim1 dim2 dim3 are the sizes of the 3D tiles in each process.\n"
<< "h is the halo width around the above dimensions.\n"
<< "datalen is the amount of data to allocated (on GPU) for performing the work to be overlapped.\n"
<< " There will be a kernel launch with this many threads.\n"
<< " tloop is the number of iterations each GPU thread will execute\n"
<< "tot_it is the number of halo exchanges to be executed before measuring time\n"
<< "suffix is a string to be appended to output files to be able to run multiple jobs at the same time\n"
<< std::endl;
return 1;
}
}
std::cout << pid << " " << nprocs << "\n";
std::stringstream ss;
ss << pid;
std::string suffix(argv[8]);
std::string filename = "out" + ss.str() + suffix + ".txt" ;
std::cout << filename << std::endl;
std::ofstream file(filename.c_str());
filep = &file;
file << pid << " " << nprocs << "\n";
MPI_Dims_create(nprocs, 3, dims);
int period[3] = {1, 1, 1};
file << "@" << pid << "@ MPI GRID SIZE " << dims[0] << " - " << dims[1] << " - " << dims[2] << "\n";
MPI_Cart_create(MPI_COMM_WORLD, 3, dims, period, false, &CartComm);
MPI_Cart_get(CartComm, 3, dims, period, coords);
/* Each process will hold a tile of size
(DIM1+2*H)x(DIM2+2*H)x(DIM3+2*H). The DIM1xDIM2xDIM3 area inside
the H width border is the inner region of an hypothetical stencil
computation whise halo width is H.
*/
int DIM1=atoi(argv[1]);
int DIM2=atoi(argv[2]);
int DIM3=atoi(argv[3]);
int H1 =atoi(argv[4]);
int H2=H1;
int H3=H1;
datalen =atoi(argv[5]);
if (datalen)
thedata = new double[datalen];
tloop =atoi(argv[6]);
tot_it =atoi(argv[7]);
hipStreamCreate ( &c_stream );
/* This example will exchange 3 data arrays at the same time with
different values.
*/
triple_t<USE_DOUBLE> *_a = new triple_t<USE_DOUBLE>[(DIM1+2*H1)*(DIM2+2*H2)*(DIM3+2*H3)];
triple_t<USE_DOUBLE> *_b = new triple_t<USE_DOUBLE>[(DIM1+2*H1)*(DIM2+2*H2)*(DIM3+2*H3)];
triple_t<USE_DOUBLE> *_c = new triple_t<USE_DOUBLE>[(DIM1+2*H1)*(DIM2+2*H2)*(DIM3+2*H3)];
prepare_to_do_something();
file << "Permutation 0,1,2\n";
#ifndef BENCH
#define BENCH 5
#endif
for (int i=0; i<BENCH; ++i) {
file << "run<std::ostream, 0,1,2, true, true, true>(file, DIM1, DIM2, DIM3, H1, H2, H3, _a, _b, _c)\n";
run<std::ostream, 0,1,2, true, true, true>(file, DIM1, DIM2, DIM3, H1, H2, H3, _a, _b, _c);
file.flush();
}
MPI_Barrier(MPI_COMM_WORLD);
MPI_Finalize();
return 0;
}
| ce40050f11bb9b4b403c86931fee56f0dd86e1f3.cu |
/*
Copyright (c) 2012, MAURO BIANCO, UGO VARETTO, SWISS NATIONAL SUPERCOMPUTING CENTRE (CSCS)
All rights reserved.
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions are met:
* Redistributions of source code must retain the above copyright
notice, this list of conditions and the following disclaimer.
* Redistributions in binary form must reproduce the above copyright
notice, this list of conditions and the following disclaimer in the
documentation and/or other materials provided with the distribution.
* Neither the name of the Swiss National Supercomputing Centre (CSCS) nor the
names of its contributors may be used to endorse or promote products
derived from this software without specific prior written permission.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
DISCLAIMED. IN NO EVENT SHALL MAURO BIANCO, UGO VARETTO, OR
SWISS NATIONAL SUPERCOMPUTING CENTRE (CSCS), BE LIABLE FOR ANY
DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
#include <GCL.h>
#include <iostream>
#include <sstream>
#include <fstream>
std::ostream *filep;
#include <halo_exchange.h>
#include <string>
#include <stdlib.h>
#include <utils/layout_map.h>
#include <utils/boollist.h>
#include <sys/time.h>
#include "triplet.h"
int pid;
int nprocs;
MPI_Comm CartComm;
int dims[3] = {0,0,0};
int coords[3]={0,0,0};
int datalen, tloop;
int tot_it;
double *thedata;
double* compdata_g;
__global__ void kernel_ex(double *a, int N, int _tloop)
{
int idx = blockIdx.x * blockDim.x + threadIdx.x;
if (idx<N)
for (int i=0; i < _tloop; ++i)
a[idx] += std::pow(a[idx],01.0056);
}
void prepare_to_do_something() {
cudaMalloc((void**)&compdata_g, sizeof(double)*datalen);
cudaMemcpy(thedata, compdata_g, sizeof(double)*datalen, cudaMemcpyDeviceToHost);
}
cudaStream_t c_stream ;
void do_something() {
dim3 grid_size, block_size;
block_size.x = 32;
block_size.y = 4;
grid_size.x = datalen / (block_size.x*block_size.y) +1;
kernel_ex <<< grid_size, block_size, 0, c_stream >>> (compdata_g, datalen, tloop);
// cudaDeviceSynchronize();
// cudaStreamDestroy ( c_stream );
}
struct timeval start_tv;
struct timeval stop1_tv;
struct timeval stop2_tv;
struct timeval stop3_tv;
double lapse_time1;
double lapse_time2;
double lapse_time3;
double lapse_time4;
#ifndef PACKING_TYPE
#define PACKING_TYPE GCL::version_manual
#endif
#define B_ADD 1
#define C_ADD 2
typedef GCL::gcl_gpu arch_type;
template <typename T, typename lmap>
struct array {
T *ptr;
int n,m,l;
array(T* _p, int _n, int _m, int _l)
: ptr(_p)
, n(lmap::template find<2>(_n,_m,_l))
, m(lmap::template find<1>(_n,_m,_l))
, l(lmap::template find<0>(_n,_m,_l))
{}
T &operator()(int i, int j, int k) {
// a[(DIM1+2*H)*(DIM2+2*H)*kk+ii*(DIM2+2*H)+jj]
return ptr[l*m*lmap::template find<2>(i,j,k)+
l*lmap::template find<1>(i,j,k)+
lmap::template find<0>(i,j,k)];
}
T const &operator()(int i, int j, int k) const {
return ptr[l*m*lmap::template find<2>(i,j,k)+
l*lmap::template find<1>(i,j,k)+
lmap::template find<0>(i,j,k)];
}
operator void*() const {return reinterpret_cast<void*>(ptr);}
operator T*() const {return ptr;}
};
/** \file Example of use of halo_exchange pattern for regular
grids. The comments in the code aim at highlight the process of
instantiating and running a halo exchange pattern.
*/
inline int modulus(int __i, int __j) {
return (((((__i%__j)<0)?(__j+__i%__j):(__i%__j))));
}
/* Just and utility to print values
*/
template <typename array_t>
void printbuff(std::ostream &file, array_t const & a, int d1, int d2, int d3) {
if (d1<=7 && d2<=7 && d3<=7) {
file << "------------\n";
for (int kk=0; kk<d3; ++kk) {
file << "|";
for (int jj=0; jj<d2; ++jj) {
for (int ii=0; ii<d1; ++ii) {
file << a(ii,jj,kk);
}
file << "|\n";
}
file << "\n\n";
}
file << "------------\n\n";
}
}
template <typename ST, int I1, int I2, int I3, bool per0, bool per1, bool per2>
void run(ST & file, int DIM1, int DIM2, int DIM3, int H1, int H2, int H3, triple_t<USE_DOUBLE> *_a, triple_t<USE_DOUBLE> *_b, triple_t<USE_DOUBLE> *_c) {
typedef GCL::layout_map<I1,I2,I3> layoutmap;
array<triple_t<USE_DOUBLE>, layoutmap > a(_a, (DIM1+2*H1),(DIM2+2*H2),(DIM3+2*H3));
array<triple_t<USE_DOUBLE>, layoutmap > b(_b, (DIM1+2*H1),(DIM2+2*H2),(DIM3+2*H3));
array<triple_t<USE_DOUBLE>, layoutmap > c(_c, (DIM1+2*H1),(DIM2+2*H2),(DIM3+2*H3));
/* Just an initialization */
for (int ii=0; ii<DIM1+2*H1; ++ii)
for (int jj=0; jj<DIM2+2*H2; ++jj) {
for (int kk=0; kk<DIM3+2*H3; ++kk) {
a(ii,jj,kk) = triple_t<USE_DOUBLE>();
b(ii,jj,kk) = triple_t<USE_DOUBLE>();
c(ii,jj,kk) = triple_t<USE_DOUBLE>();
}
}
// a(0,0,0) = triple_t<USE_DOUBLE>(3000+GCL::PID, 4000+GCL::PID, 5000+GCL::PID);
// b(0,0,0) = triple_t<USE_DOUBLE>(3010+GCL::PID, 4010+GCL::PID, 5010+GCL::PID);
// c(0,0,0) = triple_t<USE_DOUBLE>(3020+GCL::PID, 4020+GCL::PID, 5020+GCL::PID);
/* The pattern type is defined with the layouts, data types and
number of dimensions.
The logical assumption done in the program is that 'i' is the
first dimension (rows), 'j' is the second, and 'k' is the
third. The first layout states that 'i' is the second dimension
in order of strides, while 'j' is the first and 'k' is the third
(just by looking at the initialization loops this shoule be
clear).
The second layout states that the first dimension in data ('i')
identify also the first dimension in the communicator. Logically,
moving on 'i' dimension from processot (p,q,r) will lead you
logically to processor (p+1,q,r). The other dimensions goes as
the others.
*/
typedef GCL::halo_exchange_generic<GCL::layout_map<0,1,2>, 3, arch_type, PACKING_TYPE > pattern_type;
/* The pattern is now instantiated with the periodicities and the
communicator. The periodicity of the communicator is
irrelevant. Setting it to be periodic is the best choice, then
GCL can deal with any periodicity easily.
*/
pattern_type he(typename pattern_type::grid_type::period_type(per0, per1, per2), CartComm);
GCL::array<GCL::halo_descriptor,3> halo_dsc;
halo_dsc[0] = GCL::halo_descriptor(H1, H1, H1, DIM1+H1-1, DIM1+2*H1);
halo_dsc[1] = GCL::halo_descriptor(H2, H2, H2, DIM2+H2-1, DIM2+2*H2);
halo_dsc[2] = GCL::halo_descriptor(H3, H3, H3, DIM3+H3-1, DIM3+2*H3);
GCL::field_on_the_fly<triple_t<USE_DOUBLE>::data_type, layoutmap, pattern_type::traits> field1(reinterpret_cast<triple_t<USE_DOUBLE>::data_type*>(a.ptr), halo_dsc);
GCL::field_on_the_fly<triple_t<USE_DOUBLE>::data_type, layoutmap, pattern_type::traits> field2(reinterpret_cast<triple_t<USE_DOUBLE>::data_type*>(b.ptr), halo_dsc);
GCL::field_on_the_fly<triple_t<USE_DOUBLE>::data_type, layoutmap, pattern_type::traits> field3(reinterpret_cast<triple_t<USE_DOUBLE>::data_type*>(c.ptr), halo_dsc);
/* Pattern is set up. This must be done only once per pattern. The
parameter must me greater or equal to the largest number of
arrays updated in a single step.
*/
//he.setup(100, halo_dsc, sizeof(double));
he.setup(3, GCL::field_on_the_fly<int,layoutmap, pattern_type::traits>(NULL,halo_dsc), sizeof(triple_t<USE_DOUBLE>)); // Estimates the size
file << "Proc: (" << coords[0] << ", " << coords[1] << ", " << coords[2] << ")\n";
/* Data is initialized in the inner region of size DIM1xDIM2
*/
for (int ii=H1; ii<DIM1+H1; ++ii)
for (int jj=H2; jj<DIM2+H2; ++jj)
for (int kk=H3; kk<DIM3+H3; ++kk) {
a(ii,jj,kk) = //(100*(pid))+
triple_t<USE_DOUBLE>(ii-H1+(DIM1)*coords[0],
jj-H2+(DIM2)*coords[1],
kk-H3+(DIM3)*coords[2]);
b(ii,jj,kk) = //(200*(pid))+
triple_t<USE_DOUBLE>(ii-H1+(DIM1)*coords[0]+B_ADD,
jj-H2+(DIM2)*coords[1]+B_ADD,
kk-H3+(DIM3)*coords[2]+B_ADD);
c(ii,jj,kk) = //300*(pid))+
triple_t<USE_DOUBLE>(ii-H1+(DIM1)*coords[0]+C_ADD,
jj-H2+(DIM2)*coords[1]+C_ADD,
kk-H3+(DIM3)*coords[2]+C_ADD);
}
file << "A \n";
printbuff(file,a, DIM1+H1+H1, DIM2+H2+H2, DIM3+H3+H3);
file << "B \n";
printbuff(file,b, DIM1+H1+H1, DIM2+H2+H2, DIM3+H3+H3);
file << "C \n";
printbuff(file,c, DIM1+H1+H1, DIM2+H2+H2, DIM3+H3+H3);
file.flush();
file << "GPU GPU GPU GPU GPU GPU GPU GPU GPU GPU GPU GPU GPU GPU GPU GPU \n";
triple_t<USE_DOUBLE>* gpu_a = 0;
triple_t<USE_DOUBLE>* gpu_b = 0;
triple_t<USE_DOUBLE>* gpu_c = 0;
cudaError_t status;
status = cudaMalloc( &gpu_a, (DIM1+2*H1)*(DIM2+2*H2)*(DIM3+2*H3)*sizeof(triple_t<USE_DOUBLE>));
if( !checkCudaStatus( status ) ) return;
status = cudaMalloc( &gpu_b, (DIM1+2*H1)*(DIM2+2*H2)*(DIM3+2*H3)*sizeof(triple_t<USE_DOUBLE>));
if( !checkCudaStatus( status ) ) return;
status = cudaMalloc( &gpu_c, (DIM1+2*H1)*(DIM2+2*H2)*(DIM3+2*H3)*sizeof(triple_t<USE_DOUBLE>));
if( !checkCudaStatus( status ) ) return;
status = cudaMemcpy( gpu_a, a.ptr,
(DIM1+2*H1)*(DIM2+2*H2)*(DIM3+2*H3)*sizeof(triple_t<USE_DOUBLE>),
cudaMemcpyHostToDevice );
if( !checkCudaStatus( status ) ) return;
status = cudaMemcpy( gpu_b, b.ptr,
(DIM1+2*H1)*(DIM2+2*H2)*(DIM3+2*H3)*sizeof(triple_t<USE_DOUBLE>),
cudaMemcpyHostToDevice );
if( !checkCudaStatus( status ) ) return;
status = cudaMemcpy( gpu_c, c.ptr,
(DIM1+2*H1)*(DIM2+2*H2)*(DIM3+2*H3)*sizeof(triple_t<USE_DOUBLE>),
cudaMemcpyHostToDevice );
if( !checkCudaStatus( status ) ) return;
GCL::field_on_the_fly<triple_t<USE_DOUBLE>::data_type, layoutmap, pattern_type::traits> field1_gpu(reinterpret_cast<triple_t<USE_DOUBLE>::data_type*>(gpu_a), halo_dsc);
GCL::field_on_the_fly<triple_t<USE_DOUBLE>::data_type, layoutmap, pattern_type::traits> field2_gpu(reinterpret_cast<triple_t<USE_DOUBLE>::data_type*>(gpu_b), halo_dsc);
GCL::field_on_the_fly<triple_t<USE_DOUBLE>::data_type, layoutmap, pattern_type::traits> field3_gpu(reinterpret_cast<triple_t<USE_DOUBLE>::data_type*>(gpu_c), halo_dsc);
std::vector<GCL::field_on_the_fly<triple_t<USE_DOUBLE>::data_type, layoutmap, pattern_type::traits> > vect(3);
gettimeofday(&start_tv, NULL);
MPI_Barrier(MPI_COMM_WORLD);
for (int n_it = 0; n_it < tot_it; ++n_it) {
he.post_receives();
he.pack(field1_gpu, field2_gpu, field3_gpu);
//MPI_Barrier(MPI_COMM_WORLD);
he.do_sends();
do_something();
he.wait();
he.unpack(field1_gpu, field2_gpu, field3_gpu);
// MPI_Barrier(MPI_COMM_WORLD);
}
cudaDeviceSynchronize();
gettimeofday(&stop3_tv, NULL);
lapse_time4 = ((static_cast<double>(stop3_tv.tv_sec)+1/1000000.0*static_cast<double>(stop3_tv.tv_usec)) - (static_cast<double>(start_tv.tv_sec)+1/1000000.0*static_cast<double>(start_tv.tv_usec))) * 1000.0;
MPI_Barrier(MPI_COMM_WORLD);
file << "TIME PACK: " << lapse_time1 << std::endl;
file << "TIME EXCH: " << lapse_time2 << std::endl;
file << "TIME UNPK: " << lapse_time3 << std::endl;
file << "TIME ALL : " << lapse_time1+lapse_time2+lapse_time3 << std::endl;
file << "TIME TOT : " << lapse_time4 << std::endl;
status = cudaMemcpy( a.ptr, gpu_a,
(DIM1+2*H1)*(DIM2+2*H2)*(DIM3+2*H3)*sizeof(triple_t<USE_DOUBLE>),
cudaMemcpyDeviceToHost );
if( !checkCudaStatus( status ) ) return;
status = cudaMemcpy( b.ptr, gpu_b,
(DIM1+2*H1)*(DIM2+2*H2)*(DIM3+2*H3)*sizeof(triple_t<USE_DOUBLE>),
cudaMemcpyDeviceToHost );
if( !checkCudaStatus( status ) ) return;
status = cudaMemcpy( c.ptr, gpu_c,
(DIM1+2*H1)*(DIM2+2*H2)*(DIM3+2*H3)*sizeof(triple_t<USE_DOUBLE>),
cudaMemcpyDeviceToHost );
if( !checkCudaStatus( status ) ) return;
status = cudaFree( gpu_a );
if( !checkCudaStatus( status ) ) return;
status = cudaFree( gpu_b );
if( !checkCudaStatus( status ) ) return;
status = cudaFree( gpu_c );
if( !checkCudaStatus( status ) ) return;
file << "\n********************************************************************************\n";
file << "A \n";
printbuff(file,a, DIM1+H1+H1, DIM2+H2+H2, DIM3+H3+H3);
file << "B \n";
printbuff(file,b, DIM1+H1+H1, DIM2+H2+H2, DIM3+H3+H3);
file << "C \n";
printbuff(file,c, DIM1+H1+H1, DIM2+H2+H2, DIM3+H3+H3);
file.flush();
int passed = true;
/* Checking the data arrived correctly in the whole region
*/
for (int ii=0; ii<DIM1+2*H1; ++ii)
for (int jj=0; jj<DIM2+2*H2; ++jj)
for (int kk=0; kk<DIM3+2*H3; ++kk) {
triple_t<USE_DOUBLE> ta;
triple_t<USE_DOUBLE> tb;
triple_t<USE_DOUBLE> tc;
int tax, tay, taz;
int tbx, tby, tbz;
int tcx, tcy, tcz;
tax = modulus(ii-H1+(DIM1)*coords[0], DIM1*dims[0]);
tbx = modulus(ii-H1+(DIM1)*coords[0], DIM1*dims[0])+B_ADD;
tcx = modulus(ii-H1+(DIM1)*coords[0], DIM1*dims[0])+C_ADD;
tay = modulus(jj-H2+(DIM2)*coords[1], DIM2*dims[1]);
tby = modulus(jj-H2+(DIM2)*coords[1], DIM2*dims[1])+B_ADD;
tcy = modulus(jj-H2+(DIM2)*coords[1], DIM2*dims[1])+C_ADD;
taz = modulus(kk-H3+(DIM3)*coords[2], DIM3*dims[2]);
tbz = modulus(kk-H3+(DIM3)*coords[2], DIM3*dims[2])+B_ADD;
tcz = modulus(kk-H3+(DIM3)*coords[2], DIM3*dims[2])+C_ADD;
if (!per0) {
if ( ((coords[0]==0) && (ii<H1)) ||
((coords[0] == dims[0]-1) && (ii >= DIM1+H1)) ) {
tax=triple_t<USE_DOUBLE>().x();
tbx=triple_t<USE_DOUBLE>().x();
tcx=triple_t<USE_DOUBLE>().x();
}
}
if (!per1) {
if ( ((coords[1]==0) && (jj<H2)) ||
((coords[1] == dims[1]-1) && (jj >= DIM2+H2)) ) {
tay=triple_t<USE_DOUBLE>().y();
tby=triple_t<USE_DOUBLE>().y();
tcy=triple_t<USE_DOUBLE>().y();
}
}
if (!per2) {
if ( ((coords[2]==0) && (kk<H3)) ||
((coords[2] == dims[2]-1) && (kk >= DIM3+H3)) ) {
taz=triple_t<USE_DOUBLE>().z();
tbz=triple_t<USE_DOUBLE>().z();
tcz=triple_t<USE_DOUBLE>().z();
}
}
ta = triple_t<USE_DOUBLE>(tax, tay, taz).floor();
tb = triple_t<USE_DOUBLE>(tbx, tby, tbz).floor();
tc = triple_t<USE_DOUBLE>(tcx, tcy, tcz).floor();
if (a(ii,jj,kk) != ta) {
passed = false;
file << ii << ", " << jj << ", " << kk << " values found != expct: "
<< "a " << a(ii,jj,kk) << " != "
<< ta
<< "\n";
}
if (b(ii,jj,kk) != tb) {
passed = false;
file << ii << ", " << jj << ", " << kk << " values found != expct: "
<< "b " << b(ii,jj,kk) << " != "
<< tb
<< "\n";
}
if (c(ii,jj,kk) != tc) {
passed = false;
file << ii << ", " << jj << ", " << kk << " values found != expct: "
<< "c " << c(ii,jj,kk) << " != "
<< tc
<< "\n";
}
}
if (passed)
file << "RESULT: PASSED!\n";
else
file << "RESULT: FAILED!\n";
}
#ifdef _GCL_GPU_
/* device_binding added by Devendar Bureddy, OSU */
void
device_binding ()
{
int local_rank=0/*, num_local_procs*/;
int dev_count, use_dev_count, my_dev_id;
char *str;
if ((str = getenv ("MV2_COMM_WORLD_LOCAL_RANK")) != NULL)
{
local_rank = atoi (str);
printf ("MV2_COMM_WORLD_LOCAL_RANK %s\n", str);
}
if ((str = getenv ("MPISPAWN_LOCAL_NPROCS")) != NULL)
{
//num_local_procs = atoi (str);
printf ("MPISPAWN_LOCAL_NPROCS %s\n", str);
}
cudaGetDeviceCount (&dev_count);
if ((str = getenv ("NUM_GPU_DEVICES")) != NULL)
{
use_dev_count = atoi (str);
printf ("NUM_GPU_DEVICES %s\n", str);
}
else
{
use_dev_count = dev_count;
}
my_dev_id = local_rank % use_dev_count;
printf ("local rank = %d dev id = %d\n", local_rank, my_dev_id);
cudaSetDevice (my_dev_id);
}
#endif
int main(int argc, char** argv) {
#ifdef _GCL_GPU_
device_binding();
#endif
/* this example is based on MPI Cart Communicators, so we need to
initialize MPI. This can be done by GCL automatically
*/
GCL::GCL_Init(argc, argv);
/* Now let us initialize GCL itself. If MPI is not initialized at
this point, it will initialize it
*/
GCL::GCL_Init(argc, argv);
/* Here we compute the computing gris as in many applications
*/
MPI_Comm_rank(MPI_COMM_WORLD, &pid);
MPI_Comm_size(MPI_COMM_WORLD, &nprocs);
if (argc != 9) {
if (pid==0) {
std::cout << "Usage: " << argv[0] << " dim1 dim2 dim3 h datalen tloop tot_it suffix\n"
<< "where:\n"
<< "dim1 dim2 dim3 are the sizes of the 3D tiles in each process.\n"
<< "h is the halo width around the above dimensions.\n"
<< "datalen is the amount of data to allocated (on GPU) for performing the work to be overlapped.\n"
<< " There will be a kernel launch with this many threads.\n"
<< " tloop is the number of iterations each GPU thread will execute\n"
<< "tot_it is the number of halo exchanges to be executed before measuring time\n"
<< "suffix is a string to be appended to output files to be able to run multiple jobs at the same time\n"
<< std::endl;
return 1;
}
}
std::cout << pid << " " << nprocs << "\n";
std::stringstream ss;
ss << pid;
std::string suffix(argv[8]);
std::string filename = "out" + ss.str() + suffix + ".txt" ;
std::cout << filename << std::endl;
std::ofstream file(filename.c_str());
filep = &file;
file << pid << " " << nprocs << "\n";
MPI_Dims_create(nprocs, 3, dims);
int period[3] = {1, 1, 1};
file << "@" << pid << "@ MPI GRID SIZE " << dims[0] << " - " << dims[1] << " - " << dims[2] << "\n";
MPI_Cart_create(MPI_COMM_WORLD, 3, dims, period, false, &CartComm);
MPI_Cart_get(CartComm, 3, dims, period, coords);
/* Each process will hold a tile of size
(DIM1+2*H)x(DIM2+2*H)x(DIM3+2*H). The DIM1xDIM2xDIM3 area inside
the H width border is the inner region of an hypothetical stencil
computation whise halo width is H.
*/
int DIM1=atoi(argv[1]);
int DIM2=atoi(argv[2]);
int DIM3=atoi(argv[3]);
int H1 =atoi(argv[4]);
int H2=H1;
int H3=H1;
datalen =atoi(argv[5]);
if (datalen)
thedata = new double[datalen];
tloop =atoi(argv[6]);
tot_it =atoi(argv[7]);
cudaStreamCreate ( &c_stream );
/* This example will exchange 3 data arrays at the same time with
different values.
*/
triple_t<USE_DOUBLE> *_a = new triple_t<USE_DOUBLE>[(DIM1+2*H1)*(DIM2+2*H2)*(DIM3+2*H3)];
triple_t<USE_DOUBLE> *_b = new triple_t<USE_DOUBLE>[(DIM1+2*H1)*(DIM2+2*H2)*(DIM3+2*H3)];
triple_t<USE_DOUBLE> *_c = new triple_t<USE_DOUBLE>[(DIM1+2*H1)*(DIM2+2*H2)*(DIM3+2*H3)];
prepare_to_do_something();
file << "Permutation 0,1,2\n";
#ifndef BENCH
#define BENCH 5
#endif
for (int i=0; i<BENCH; ++i) {
file << "run<std::ostream, 0,1,2, true, true, true>(file, DIM1, DIM2, DIM3, H1, H2, H3, _a, _b, _c)\n";
run<std::ostream, 0,1,2, true, true, true>(file, DIM1, DIM2, DIM3, H1, H2, H3, _a, _b, _c);
file.flush();
}
MPI_Barrier(MPI_COMM_WORLD);
MPI_Finalize();
return 0;
}
|
628ae2d1209ed262d0b3910765b906dd243fd85f.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/*
-- MAGMA (version 2.4.0) --
Univ. of Tennessee, Knoxville
Univ. of California, Berkeley
Univ. of Colorado, Denver
@date June 2018
@precisions normal z -> c d s
@author Hartwig Anzt
*/
#include "magmasparse_internal.h"
#define BLOCK_SIZE 256
#define PRECISION_z
// These routines merge multiple kernels from zmergebicgstab into one
// This is the code used for the ASHES2014 paper
// "Accelerating Krylov Subspace Solvers on Graphics Processing Units".
// notice that only CSR format is supported so far.
// accelerated reduction for one vector
__global__ void
magma_zreduce_kernel_spmv1(
int Gs,
int n,
magmaDoubleComplex * vtmp,
magmaDoubleComplex * vtmp2 )
{
extern __shared__ magmaDoubleComplex temp[];
int Idx = threadIdx.x;
int blockSize = 128;
int gridSize = blockSize * 2 * gridDim.x;
temp[Idx] = MAGMA_Z_MAKE( 0.0, 0.0);
int i = blockIdx.x * ( blockSize * 2 ) + Idx;
while (i < Gs ) {
temp[ Idx ] += vtmp[ i ];
temp[ Idx ] += ( i + blockSize < Gs ) ? vtmp[ i + blockSize ]
: MAGMA_Z_MAKE( 0.0, 0.0);
i += gridSize;
}
__syncthreads();
if ( Idx < 64 ){
temp[ Idx ] += temp[ Idx + 64 ];
}
__syncthreads();
#if defined(PRECISION_z) || defined(PRECISION_c)
if( Idx < 32 ){
temp[ Idx ] += temp[ Idx + 32 ]; __syncthreads();
temp[ Idx ] += temp[ Idx + 16 ]; __syncthreads();
temp[ Idx ] += temp[ Idx + 8 ]; __syncthreads();
temp[ Idx ] += temp[ Idx + 4 ]; __syncthreads();
temp[ Idx ] += temp[ Idx + 2 ]; __syncthreads();
temp[ Idx ] += temp[ Idx + 1 ]; __syncthreads();
}
#endif
#if defined(PRECISION_d)
if( Idx < 32 ){
volatile double *temp2 = temp;
temp2[ Idx ] += temp2[ Idx + 32 ];
temp2[ Idx ] += temp2[ Idx + 16 ];
temp2[ Idx ] += temp2[ Idx + 8 ];
temp2[ Idx ] += temp2[ Idx + 4 ];
temp2[ Idx ] += temp2[ Idx + 2 ];
temp2[ Idx ] += temp2[ Idx + 1 ];
}
#endif
#if defined(PRECISION_s)
if( Idx < 32 ){
volatile float *temp2 = temp;
temp2[ Idx ] += temp2[ Idx + 32 ];
temp2[ Idx ] += temp2[ Idx + 16 ];
temp2[ Idx ] += temp2[ Idx + 8 ];
temp2[ Idx ] += temp2[ Idx + 4 ];
temp2[ Idx ] += temp2[ Idx + 2 ];
temp2[ Idx ] += temp2[ Idx + 1 ];
}
#endif
if ( Idx == 0 ){
vtmp2[ blockIdx.x ] = temp[ 0 ];
}
}
__global__ void
magma_zbicgmerge_spmv1_kernel(
int n,
magmaDoubleComplex * dval,
magma_index_t * drowptr,
magma_index_t * dcolind,
magmaDoubleComplex * p,
magmaDoubleComplex * r,
magmaDoubleComplex * v,
magmaDoubleComplex * vtmp)
{
extern __shared__ magmaDoubleComplex temp[];
int Idx = threadIdx.x;
int i = blockIdx.x * blockDim.x + Idx;
int j;
if( i<n ){
magmaDoubleComplex dot = MAGMA_Z_ZERO;
int start = drowptr[ i ];
int end = drowptr[ i+1 ];
for( j=start; j<end; j++)
dot += dval[ j ] * p[ dcolind[j] ];
v[ i ] = dot;
}
__syncthreads();
temp[ Idx ] = ( i < n ) ? v[ i ] * r[ i ] : MAGMA_Z_MAKE( 0.0, 0.0);
__syncthreads();
if ( Idx < 128 ){
temp[ Idx ] += temp[ Idx + 128 ];
}
__syncthreads();
if ( Idx < 64 ){
temp[ Idx ] += temp[ Idx + 64 ];
}
__syncthreads();
#if defined(PRECISION_z) || defined(PRECISION_c)
if( Idx < 32 ){
temp[ Idx ] += temp[ Idx + 32 ]; __syncthreads();
temp[ Idx ] += temp[ Idx + 16 ]; __syncthreads();
temp[ Idx ] += temp[ Idx + 8 ]; __syncthreads();
temp[ Idx ] += temp[ Idx + 4 ]; __syncthreads();
temp[ Idx ] += temp[ Idx + 2 ]; __syncthreads();
temp[ Idx ] += temp[ Idx + 1 ]; __syncthreads();
}
#endif
#if defined(PRECISION_d)
if( Idx < 32 ){
volatile double *temp2 = temp;
temp2[ Idx ] += temp2[ Idx + 32 ];
temp2[ Idx ] += temp2[ Idx + 16 ];
temp2[ Idx ] += temp2[ Idx + 8 ];
temp2[ Idx ] += temp2[ Idx + 4 ];
temp2[ Idx ] += temp2[ Idx + 2 ];
temp2[ Idx ] += temp2[ Idx + 1 ];
}
#endif
#if defined(PRECISION_s)
if( Idx < 32 ){
volatile float *temp2 = temp;
temp2[ Idx ] += temp2[ Idx + 32 ];
temp2[ Idx ] += temp2[ Idx + 16 ];
temp2[ Idx ] += temp2[ Idx + 8 ];
temp2[ Idx ] += temp2[ Idx + 4 ];
temp2[ Idx ] += temp2[ Idx + 2 ];
temp2[ Idx ] += temp2[ Idx + 1 ];
}
#endif
if ( Idx == 0 ){
vtmp[ blockIdx.x ] = temp[ 0 ];
}
}
__global__ void
magma_zbicgstab_alphakernel(
magmaDoubleComplex * skp ){
int i = blockIdx.x * blockDim.x + threadIdx.x;
if( i==0 ){
magmaDoubleComplex tmp = skp[0];
skp[0] = skp[4]/tmp;
}
}
/**
Purpose
-------
Merges the first SpmV using CSR with the dot product
and the computation of alpha
Arguments
---------
@param[in]
A magma_z_matrix
system matrix
@param[in]
d1 magmaDoubleComplex_ptr
temporary vector
@param[in]
d2 magmaDoubleComplex_ptr
temporary vector
@param[in]
dp magmaDoubleComplex_ptr
input vector p
@param[in]
dr magmaDoubleComplex_ptr
input vector r
@param[in]
dv magmaDoubleComplex_ptr
output vector v
@param[in,out]
skp magmaDoubleComplex_ptr
array for parameters ( skp[0]=alpha )
@param[in]
queue magma_queue_t
Queue to execute in.
@ingroup magmasparse_zgegpuk
********************************************************************/
extern "C" magma_int_t
magma_zbicgmerge_spmv1(
magma_z_matrix A,
magmaDoubleComplex_ptr d1,
magmaDoubleComplex_ptr d2,
magmaDoubleComplex_ptr dp,
magmaDoubleComplex_ptr dr,
magmaDoubleComplex_ptr dv,
magmaDoubleComplex_ptr skp,
magma_queue_t queue )
{
int n = A.num_rows;
int local_block_size=256;
dim3 Bs( local_block_size );
dim3 Gs( magma_ceildiv( n, local_block_size ) );
dim3 Gs_next;
int Ms = local_block_size * sizeof( magmaDoubleComplex );
magmaDoubleComplex_ptr aux1 = d1, aux2 = d2;
int b = 1;
if ( A.storage_type == Magma_CSR)
hipLaunchKernelGGL(( magma_zbicgmerge_spmv1_kernel), dim3(Gs), dim3(Bs), Ms, queue->cuda_stream(),
n, A.dval, A.drow, A.dcol, dp, dr, dv, d1 );
else
printf("error: only CSR format supported.\n");
while( Gs.x > 1 ) {
Gs_next.x = magma_ceildiv( Gs.x, Bs.x );
if ( Gs_next.x == 1 ) Gs_next.x = 2;
hipLaunchKernelGGL(( magma_zreduce_kernel_spmv1), dim3(Gs_next.x/2), dim3(Bs.x/2), Ms/2, queue->cuda_stream(),
Gs.x, n, aux1, aux2 );
Gs_next.x = Gs_next.x /2;
Gs.x = Gs_next.x;
b = 1 - b;
if ( b ) { aux1 = d1; aux2 = d2; }
else { aux2 = d1; aux1 = d2; }
}
magma_zcopyvector( 1, aux1, 1, skp, 1, queue );
dim3 Bs2( 2 );
dim3 Gs2( 1 );
hipLaunchKernelGGL(( magma_zbicgstab_alphakernel), dim3(Gs2), dim3(Bs2), 0, queue->cuda_stream(), skp );
return MAGMA_SUCCESS;
}
/* -------------------------------------------------------------------------- */
// accelerated block reduction for multiple vectors
__global__ void
magma_zreduce_kernel_spmv2(
int Gs,
int n,
magmaDoubleComplex * vtmp,
magmaDoubleComplex * vtmp2 )
{
extern __shared__ magmaDoubleComplex temp[];
int Idx = threadIdx.x;
int blockSize = 128;
int gridSize = blockSize * 2 * gridDim.x;
int j;
for( j=0; j<2; j++){
int i = blockIdx.x * ( blockSize * 2 ) + Idx;
temp[Idx+j*(blockSize)] = MAGMA_Z_MAKE( 0.0, 0.0);
while (i < Gs ) {
temp[ Idx+j*(blockSize) ] += vtmp[ i+j*n ];
temp[ Idx+j*(blockSize) ] +=
( i + (blockSize) < Gs ) ? vtmp[ i+j*n + (blockSize) ]
: MAGMA_Z_MAKE( 0.0, 0.0);
i += gridSize;
}
}
__syncthreads();
if ( Idx < 64 ){
for( j=0; j<2; j++){
temp[ Idx+j*(blockSize) ] += temp[ Idx+j*(blockSize) + 64 ];
}
}
__syncthreads();
#if defined(PRECISION_z) || defined(PRECISION_c)
if( Idx < 32 ){
for( j=0; j<2; j++)
temp[ Idx+j*(blockSize) ] += temp[ Idx+j*(blockSize) + 32 ];
__syncthreads();
for( j=0; j<2; j++)
temp[ Idx+j*(blockSize) ] += temp[ Idx+j*(blockSize) + 16 ];
__syncthreads();
for( j=0; j<2; j++)
temp[ Idx+j*(blockSize) ] += temp[ Idx+j*(blockSize) + 8 ];
__syncthreads();
for( j=0; j<2; j++)
temp[ Idx+j*(blockSize) ] += temp[ Idx+j*(blockSize) + 4 ];
__syncthreads();
for( j=0; j<2; j++)
temp[ Idx+j*(blockSize) ] += temp[ Idx+j*(blockSize) + 2 ];
__syncthreads();
for( j=0; j<2; j++)
temp[ Idx+j*(blockSize) ] += temp[ Idx+j*(blockSize) + 1 ];
__syncthreads();
}
#endif
#if defined(PRECISION_d)
if( Idx < 32 ){
volatile double *temp2 = temp;
for( j=0; j<2; j++){
temp2[ Idx+j*(blockSize) ] += temp2[ Idx+j*(blockSize) + 32 ];
temp2[ Idx+j*(blockSize) ] += temp2[ Idx+j*(blockSize) + 16 ];
temp2[ Idx+j*(blockSize) ] += temp2[ Idx+j*(blockSize) + 8 ];
temp2[ Idx+j*(blockSize) ] += temp2[ Idx+j*(blockSize) + 4 ];
temp2[ Idx+j*(blockSize) ] += temp2[ Idx+j*(blockSize) + 2 ];
temp2[ Idx+j*(blockSize) ] += temp2[ Idx+j*(blockSize) + 1 ];
}
}
#endif
#if defined(PRECISION_s)
if( Idx < 32 ){
volatile float *temp2 = temp;
for( j=0; j<2; j++){
temp2[ Idx+j*(blockSize) ] += temp2[ Idx+j*(blockSize) + 32 ];
temp2[ Idx+j*(blockSize) ] += temp2[ Idx+j*(blockSize) + 16 ];
temp2[ Idx+j*(blockSize) ] += temp2[ Idx+j*(blockSize) + 8 ];
temp2[ Idx+j*(blockSize) ] += temp2[ Idx+j*(blockSize) + 4 ];
temp2[ Idx+j*(blockSize) ] += temp2[ Idx+j*(blockSize) + 2 ];
temp2[ Idx+j*(blockSize) ] += temp2[ Idx+j*(blockSize) + 1 ];
}
}
#endif
if ( Idx == 0 ){
for( j=0; j<2; j++){
vtmp2[ blockIdx.x+j*n ] = temp[ j*(blockSize) ];
}
}
}
__global__ void
magma_zbicgmerge_spmv2_kernel(
int n,
magmaDoubleComplex * dval,
magma_index_t * drowptr,
magma_index_t * dcolind,
magmaDoubleComplex * s,
magmaDoubleComplex * t,
magmaDoubleComplex * vtmp )
{
extern __shared__ magmaDoubleComplex temp[];
int Idx = threadIdx.x;
int i = blockIdx.x * blockDim.x + Idx;
int j;
if( i<n ){
magmaDoubleComplex dot = MAGMA_Z_ZERO;
int start = drowptr[ i ];
int end = drowptr[ i+1 ];
for( j=start; j<end; j++)
dot += dval[ j ] * s[ dcolind[j] ];
t[ i ] = dot;
}
__syncthreads();
// 2 vectors
if (i<n){
magmaDoubleComplex tmp2 = t[i];
temp[Idx] = s[i] * tmp2;
temp[Idx+blockDim.x] = tmp2 * tmp2;
}
else {
for( j=0; j<2; j++)
temp[Idx+j*blockDim.x] = MAGMA_Z_MAKE( 0.0, 0.0);
}
__syncthreads();
if ( Idx < 128 ){
for( j=0; j<2; j++){
temp[ Idx+j*blockDim.x ] += temp[ Idx+j*blockDim.x + 128 ];
}
}
__syncthreads();
if ( Idx < 64 ){
for( j=0; j<2; j++){
temp[ Idx+j*blockDim.x ] += temp[ Idx+j*blockDim.x + 64 ];
}
}
__syncthreads();
#if defined(PRECISION_z) || defined(PRECISION_c)
if( Idx < 32 ){
for( j=0; j<2; j++)
temp[ Idx+j*blockDim.x ] += temp[ Idx+j*blockDim.x + 32 ];
__syncthreads();
for( j=0; j<2; j++)
temp[ Idx+j*blockDim.x ] += temp[ Idx+j*blockDim.x + 16 ];
__syncthreads();
for( j=0; j<2; j++)
temp[ Idx+j*blockDim.x ] += temp[ Idx+j*blockDim.x + 8 ];
__syncthreads();
for( j=0; j<2; j++)
temp[ Idx+j*blockDim.x ] += temp[ Idx+j*blockDim.x + 4 ];
__syncthreads();
for( j=0; j<2; j++)
temp[ Idx+j*blockDim.x ] += temp[ Idx+j*blockDim.x + 2 ];
__syncthreads();
for( j=0; j<2; j++)
temp[ Idx+j*blockDim.x ] += temp[ Idx+j*blockDim.x + 1 ];
__syncthreads();
}
#endif
#if defined(PRECISION_d)
if( Idx < 32 ){
volatile double *temp2 = temp;
for( j=0; j<2; j++){
temp2[ Idx+j*blockDim.x ] += temp2[ Idx+j*blockDim.x + 32 ];
temp2[ Idx+j*blockDim.x ] += temp2[ Idx+j*blockDim.x + 16 ];
temp2[ Idx+j*blockDim.x ] += temp2[ Idx+j*blockDim.x + 8 ];
temp2[ Idx+j*blockDim.x ] += temp2[ Idx+j*blockDim.x + 4 ];
temp2[ Idx+j*blockDim.x ] += temp2[ Idx+j*blockDim.x + 2 ];
temp2[ Idx+j*blockDim.x ] += temp2[ Idx+j*blockDim.x + 1 ];
}
}
#endif
#if defined(PRECISION_s)
if( Idx < 32 ){
volatile float *temp2 = temp;
for( j=0; j<2; j++){
temp2[ Idx+j*blockDim.x ] += temp2[ Idx+j*blockDim.x + 32 ];
temp2[ Idx+j*blockDim.x ] += temp2[ Idx+j*blockDim.x + 16 ];
temp2[ Idx+j*blockDim.x ] += temp2[ Idx+j*blockDim.x + 8 ];
temp2[ Idx+j*blockDim.x ] += temp2[ Idx+j*blockDim.x + 4 ];
temp2[ Idx+j*blockDim.x ] += temp2[ Idx+j*blockDim.x + 2 ];
temp2[ Idx+j*blockDim.x ] += temp2[ Idx+j*blockDim.x + 1 ];
}
}
#endif
if ( Idx == 0 ){
for( j=0; j<2; j++){
vtmp[ blockIdx.x+j*n ] = temp[ j*blockDim.x ];
}
}
}
__global__ void
magma_zbicgstab_omegakernel(
magmaDoubleComplex * skp ){
int i = blockIdx.x * blockDim.x + threadIdx.x;
if( i==0 ){
skp[2] = skp[6]/skp[7];
skp[3] = skp[4];
}
}
/**
Purpose
-------
Merges the second SpmV using CSR with the dot product
and the computation of omega
Arguments
---------
@param[in]
A magma_z_matrix
input matrix
@param[in]
d1 magmaDoubleComplex_ptr
temporary vector
@param[in]
d2 magmaDoubleComplex_ptr
temporary vector
@param[in]
ds magmaDoubleComplex_ptr
input vector s
@param[in]
dt magmaDoubleComplex_ptr
output vector t
@param[in,out]
skp magmaDoubleComplex_ptr
array for parameters
@param[in]
queue magma_queue_t
Queue to execute in.
@ingroup magmasparse_zgegpuk
********************************************************************/
extern "C" magma_int_t
magma_zbicgmerge_spmv2(
magma_z_matrix A,
magmaDoubleComplex_ptr d1,
magmaDoubleComplex_ptr d2,
magmaDoubleComplex_ptr ds,
magmaDoubleComplex_ptr dt,
magmaDoubleComplex_ptr skp,
magma_queue_t queue )
{
int n = A.num_rows;
int local_block_size=256;
dim3 Bs( local_block_size );
dim3 Gs( magma_ceildiv( n, local_block_size ) );
dim3 Gs_next;
int Ms = 2*local_block_size * sizeof( magmaDoubleComplex );
magmaDoubleComplex_ptr aux1 = d1, aux2 = d2;
int b = 1;
if ( A.storage_type == Magma_CSR)
hipLaunchKernelGGL(( magma_zbicgmerge_spmv2_kernel), dim3(Gs), dim3(Bs), Ms, queue->cuda_stream(),
n, A.dval, A.drow, A.dcol, ds, dt, d1 );
else
printf("error: only CSR format supported.\n");
while( Gs.x > 1 ) {
Gs_next.x = magma_ceildiv( Gs.x, Bs.x );
if ( Gs_next.x == 1 ) Gs_next.x = 2;
hipLaunchKernelGGL(( magma_zreduce_kernel_spmv2), dim3(Gs_next.x/2), dim3(Bs.x/2), Ms/2, queue->cuda_stream(),
Gs.x, n, aux1, aux2 );
Gs_next.x = Gs_next.x /2;
Gs.x = Gs_next.x;
b = 1 - b;
if ( b ) { aux1 = d1; aux2 = d2; }
else { aux2 = d1; aux1 = d2; }
}
magma_zcopyvector( 1, aux1, 1, skp+6, 1, queue );
magma_zcopyvector( 1, aux1+n, 1, skp+7, 1, queue );
dim3 Bs2( 2 );
dim3 Gs2( 1 );
hipLaunchKernelGGL(( magma_zbicgstab_omegakernel), dim3(Gs2), dim3(Bs2), 0, queue->cuda_stream(), skp );
return MAGMA_SUCCESS;
}
/* -------------------------------------------------------------------------- */
__global__ void
magma_zbicgmerge_xrbeta_kernel(
int n,
magmaDoubleComplex * rr,
magmaDoubleComplex * r,
magmaDoubleComplex * p,
magmaDoubleComplex * s,
magmaDoubleComplex * t,
magmaDoubleComplex * x,
magmaDoubleComplex * skp,
magmaDoubleComplex * vtmp )
{
extern __shared__ magmaDoubleComplex temp[];
int Idx = threadIdx.x;
int i = blockIdx.x * blockDim.x + Idx;
int j;
magmaDoubleComplex alpha=skp[0];
magmaDoubleComplex omega=skp[2];
if( i<n ){
magmaDoubleComplex sl;
sl = s[i];
x[i] = x[i] + alpha * p[i] + omega * sl;
r[i] = sl - omega * t[i];
}
__syncthreads();
// 2 vectors
if (i<n){
magmaDoubleComplex tmp2 = r[i];
temp[Idx] = rr[i] * tmp2;
temp[Idx+blockDim.x] = tmp2 * tmp2;
}
else {
for( j=0; j<2; j++)
temp[Idx+j*blockDim.x] = MAGMA_Z_MAKE( 0.0, 0.0);
}
__syncthreads();
if ( Idx < 128 ){
for( j=0; j<2; j++){
temp[ Idx+j*blockDim.x ] += temp[ Idx+j*blockDim.x + 128 ];
}
}
__syncthreads();
if ( Idx < 64 ){
for( j=0; j<2; j++){
temp[ Idx+j*blockDim.x ] += temp[ Idx+j*blockDim.x + 64 ];
}
}
__syncthreads();
#if defined(PRECISION_z) || defined(PRECISION_c)
if( Idx < 32 ){
for( j=0; j<2; j++)
temp[ Idx+j*blockDim.x ] += temp[ Idx+j*blockDim.x + 32 ];
__syncthreads();
for( j=0; j<2; j++)
temp[ Idx+j*blockDim.x ] += temp[ Idx+j*blockDim.x + 16 ];
__syncthreads();
for( j=0; j<2; j++)
temp[ Idx+j*blockDim.x ] += temp[ Idx+j*blockDim.x + 8 ];
__syncthreads();
for( j=0; j<2; j++)
temp[ Idx+j*blockDim.x ] += temp[ Idx+j*blockDim.x + 4 ];
__syncthreads();
for( j=0; j<2; j++)
temp[ Idx+j*blockDim.x ] += temp[ Idx+j*blockDim.x + 2 ];
__syncthreads();
for( j=0; j<2; j++)
temp[ Idx+j*blockDim.x ] += temp[ Idx+j*blockDim.x + 1 ];
__syncthreads();
}
#endif
#if defined(PRECISION_d)
if( Idx < 32 ){
volatile double *temp2 = temp;
for( j=0; j<2; j++){
temp2[ Idx+j*blockDim.x ] += temp2[ Idx+j*blockDim.x + 32 ];
temp2[ Idx+j*blockDim.x ] += temp2[ Idx+j*blockDim.x + 16 ];
temp2[ Idx+j*blockDim.x ] += temp2[ Idx+j*blockDim.x + 8 ];
temp2[ Idx+j*blockDim.x ] += temp2[ Idx+j*blockDim.x + 4 ];
temp2[ Idx+j*blockDim.x ] += temp2[ Idx+j*blockDim.x + 2 ];
temp2[ Idx+j*blockDim.x ] += temp2[ Idx+j*blockDim.x + 1 ];
}
}
#endif
#if defined(PRECISION_s)
if( Idx < 32 ){
volatile float *temp2 = temp;
for( j=0; j<2; j++){
temp2[ Idx+j*blockDim.x ] += temp2[ Idx+j*blockDim.x + 32 ];
temp2[ Idx+j*blockDim.x ] += temp2[ Idx+j*blockDim.x + 16 ];
temp2[ Idx+j*blockDim.x ] += temp2[ Idx+j*blockDim.x + 8 ];
temp2[ Idx+j*blockDim.x ] += temp2[ Idx+j*blockDim.x + 4 ];
temp2[ Idx+j*blockDim.x ] += temp2[ Idx+j*blockDim.x + 2 ];
temp2[ Idx+j*blockDim.x ] += temp2[ Idx+j*blockDim.x + 1 ];
}
}
#endif
if ( Idx == 0 ){
for( j=0; j<2; j++){
vtmp[ blockIdx.x+j*n ] = temp[ j*blockDim.x ];
}
}
}
__global__ void
magma_zbicgstab_betakernel(
magmaDoubleComplex * skp )
{
int i = blockIdx.x * blockDim.x + threadIdx.x;
if( i==0 ){
magmaDoubleComplex tmp1 = skp[4]/skp[3];
magmaDoubleComplex tmp2 = skp[0] / skp[2];
skp[1] = tmp1*tmp2;
}
}
/**
Purpose
-------
Merges the second SpmV using CSR with the dot product
and the computation of omega
Arguments
---------
@param[in]
n int
dimension n
@param[in]
d1 magmaDoubleComplex_ptr
temporary vector
@param[in]
d2 magmaDoubleComplex_ptr
temporary vector
@param[in]
rr magmaDoubleComplex_ptr
input vector rr
@param[in]
r magmaDoubleComplex_ptr
input/output vector r
@param[in]
p magmaDoubleComplex_ptr
input vector p
@param[in]
s magmaDoubleComplex_ptr
input vector s
@param[in]
t magmaDoubleComplex_ptr
input vector t
@param[out]
x magmaDoubleComplex_ptr
output vector x
@param[in]
skp magmaDoubleComplex_ptr
array for parameters
@param[in]
queue magma_queue_t
Queue to execute in.
@ingroup magmasparse_zgegpuk
********************************************************************/
extern "C" magma_int_t
magma_zbicgmerge_xrbeta(
magma_int_t n,
magmaDoubleComplex_ptr d1,
magmaDoubleComplex_ptr d2,
magmaDoubleComplex_ptr rr,
magmaDoubleComplex_ptr r,
magmaDoubleComplex_ptr p,
magmaDoubleComplex_ptr s,
magmaDoubleComplex_ptr t,
magmaDoubleComplex_ptr x,
magmaDoubleComplex_ptr skp,
magma_queue_t queue )
{
int local_block_size=256;
dim3 Bs( local_block_size );
dim3 Gs( magma_ceildiv( n, local_block_size ) );
dim3 Gs_next;
int Ms = 2*local_block_size * sizeof( magmaDoubleComplex );
magmaDoubleComplex_ptr aux1 = d1, aux2 = d2;
int b = 1;
hipLaunchKernelGGL(( magma_zbicgmerge_xrbeta_kernel), dim3(Gs), dim3(Bs), Ms, queue->cuda_stream(),
n, rr, r, p, s, t, x, skp, d1);
while( Gs.x > 1 ) {
Gs_next.x = magma_ceildiv( Gs.x, Bs.x );
if ( Gs_next.x == 1 ) Gs_next.x = 2;
hipLaunchKernelGGL(( magma_zreduce_kernel_spmv2), dim3(Gs_next.x/2), dim3(Bs.x/2), Ms/2, queue->cuda_stream(),
Gs.x, n, aux1, aux2 );
Gs_next.x = Gs_next.x /2;
Gs.x = Gs_next.x;
b = 1 - b;
if ( b ) { aux1 = d1; aux2 = d2; }
else { aux2 = d1; aux1 = d2; }
}
magma_zcopyvector( 1, aux1, 1, skp+4, 1, queue );
magma_zcopyvector( 1, aux1+n, 1, skp+5, 1, queue );
dim3 Bs2( 2 );
dim3 Gs2( 1 );
hipLaunchKernelGGL(( magma_zbicgstab_betakernel), dim3(Gs2), dim3(Bs2), 0, queue->cuda_stream(), skp );
return MAGMA_SUCCESS;
}
/* -------------------------------------------------------------------------- */
| 628ae2d1209ed262d0b3910765b906dd243fd85f.cu | /*
-- MAGMA (version 2.4.0) --
Univ. of Tennessee, Knoxville
Univ. of California, Berkeley
Univ. of Colorado, Denver
@date June 2018
@precisions normal z -> c d s
@author Hartwig Anzt
*/
#include "magmasparse_internal.h"
#define BLOCK_SIZE 256
#define PRECISION_z
// These routines merge multiple kernels from zmergebicgstab into one
// This is the code used for the ASHES2014 paper
// "Accelerating Krylov Subspace Solvers on Graphics Processing Units".
// notice that only CSR format is supported so far.
// accelerated reduction for one vector
__global__ void
magma_zreduce_kernel_spmv1(
int Gs,
int n,
magmaDoubleComplex * vtmp,
magmaDoubleComplex * vtmp2 )
{
extern __shared__ magmaDoubleComplex temp[];
int Idx = threadIdx.x;
int blockSize = 128;
int gridSize = blockSize * 2 * gridDim.x;
temp[Idx] = MAGMA_Z_MAKE( 0.0, 0.0);
int i = blockIdx.x * ( blockSize * 2 ) + Idx;
while (i < Gs ) {
temp[ Idx ] += vtmp[ i ];
temp[ Idx ] += ( i + blockSize < Gs ) ? vtmp[ i + blockSize ]
: MAGMA_Z_MAKE( 0.0, 0.0);
i += gridSize;
}
__syncthreads();
if ( Idx < 64 ){
temp[ Idx ] += temp[ Idx + 64 ];
}
__syncthreads();
#if defined(PRECISION_z) || defined(PRECISION_c)
if( Idx < 32 ){
temp[ Idx ] += temp[ Idx + 32 ]; __syncthreads();
temp[ Idx ] += temp[ Idx + 16 ]; __syncthreads();
temp[ Idx ] += temp[ Idx + 8 ]; __syncthreads();
temp[ Idx ] += temp[ Idx + 4 ]; __syncthreads();
temp[ Idx ] += temp[ Idx + 2 ]; __syncthreads();
temp[ Idx ] += temp[ Idx + 1 ]; __syncthreads();
}
#endif
#if defined(PRECISION_d)
if( Idx < 32 ){
volatile double *temp2 = temp;
temp2[ Idx ] += temp2[ Idx + 32 ];
temp2[ Idx ] += temp2[ Idx + 16 ];
temp2[ Idx ] += temp2[ Idx + 8 ];
temp2[ Idx ] += temp2[ Idx + 4 ];
temp2[ Idx ] += temp2[ Idx + 2 ];
temp2[ Idx ] += temp2[ Idx + 1 ];
}
#endif
#if defined(PRECISION_s)
if( Idx < 32 ){
volatile float *temp2 = temp;
temp2[ Idx ] += temp2[ Idx + 32 ];
temp2[ Idx ] += temp2[ Idx + 16 ];
temp2[ Idx ] += temp2[ Idx + 8 ];
temp2[ Idx ] += temp2[ Idx + 4 ];
temp2[ Idx ] += temp2[ Idx + 2 ];
temp2[ Idx ] += temp2[ Idx + 1 ];
}
#endif
if ( Idx == 0 ){
vtmp2[ blockIdx.x ] = temp[ 0 ];
}
}
__global__ void
magma_zbicgmerge_spmv1_kernel(
int n,
magmaDoubleComplex * dval,
magma_index_t * drowptr,
magma_index_t * dcolind,
magmaDoubleComplex * p,
magmaDoubleComplex * r,
magmaDoubleComplex * v,
magmaDoubleComplex * vtmp)
{
extern __shared__ magmaDoubleComplex temp[];
int Idx = threadIdx.x;
int i = blockIdx.x * blockDim.x + Idx;
int j;
if( i<n ){
magmaDoubleComplex dot = MAGMA_Z_ZERO;
int start = drowptr[ i ];
int end = drowptr[ i+1 ];
for( j=start; j<end; j++)
dot += dval[ j ] * p[ dcolind[j] ];
v[ i ] = dot;
}
__syncthreads();
temp[ Idx ] = ( i < n ) ? v[ i ] * r[ i ] : MAGMA_Z_MAKE( 0.0, 0.0);
__syncthreads();
if ( Idx < 128 ){
temp[ Idx ] += temp[ Idx + 128 ];
}
__syncthreads();
if ( Idx < 64 ){
temp[ Idx ] += temp[ Idx + 64 ];
}
__syncthreads();
#if defined(PRECISION_z) || defined(PRECISION_c)
if( Idx < 32 ){
temp[ Idx ] += temp[ Idx + 32 ]; __syncthreads();
temp[ Idx ] += temp[ Idx + 16 ]; __syncthreads();
temp[ Idx ] += temp[ Idx + 8 ]; __syncthreads();
temp[ Idx ] += temp[ Idx + 4 ]; __syncthreads();
temp[ Idx ] += temp[ Idx + 2 ]; __syncthreads();
temp[ Idx ] += temp[ Idx + 1 ]; __syncthreads();
}
#endif
#if defined(PRECISION_d)
if( Idx < 32 ){
volatile double *temp2 = temp;
temp2[ Idx ] += temp2[ Idx + 32 ];
temp2[ Idx ] += temp2[ Idx + 16 ];
temp2[ Idx ] += temp2[ Idx + 8 ];
temp2[ Idx ] += temp2[ Idx + 4 ];
temp2[ Idx ] += temp2[ Idx + 2 ];
temp2[ Idx ] += temp2[ Idx + 1 ];
}
#endif
#if defined(PRECISION_s)
if( Idx < 32 ){
volatile float *temp2 = temp;
temp2[ Idx ] += temp2[ Idx + 32 ];
temp2[ Idx ] += temp2[ Idx + 16 ];
temp2[ Idx ] += temp2[ Idx + 8 ];
temp2[ Idx ] += temp2[ Idx + 4 ];
temp2[ Idx ] += temp2[ Idx + 2 ];
temp2[ Idx ] += temp2[ Idx + 1 ];
}
#endif
if ( Idx == 0 ){
vtmp[ blockIdx.x ] = temp[ 0 ];
}
}
__global__ void
magma_zbicgstab_alphakernel(
magmaDoubleComplex * skp ){
int i = blockIdx.x * blockDim.x + threadIdx.x;
if( i==0 ){
magmaDoubleComplex tmp = skp[0];
skp[0] = skp[4]/tmp;
}
}
/**
Purpose
-------
Merges the first SpmV using CSR with the dot product
and the computation of alpha
Arguments
---------
@param[in]
A magma_z_matrix
system matrix
@param[in]
d1 magmaDoubleComplex_ptr
temporary vector
@param[in]
d2 magmaDoubleComplex_ptr
temporary vector
@param[in]
dp magmaDoubleComplex_ptr
input vector p
@param[in]
dr magmaDoubleComplex_ptr
input vector r
@param[in]
dv magmaDoubleComplex_ptr
output vector v
@param[in,out]
skp magmaDoubleComplex_ptr
array for parameters ( skp[0]=alpha )
@param[in]
queue magma_queue_t
Queue to execute in.
@ingroup magmasparse_zgegpuk
********************************************************************/
extern "C" magma_int_t
magma_zbicgmerge_spmv1(
magma_z_matrix A,
magmaDoubleComplex_ptr d1,
magmaDoubleComplex_ptr d2,
magmaDoubleComplex_ptr dp,
magmaDoubleComplex_ptr dr,
magmaDoubleComplex_ptr dv,
magmaDoubleComplex_ptr skp,
magma_queue_t queue )
{
int n = A.num_rows;
int local_block_size=256;
dim3 Bs( local_block_size );
dim3 Gs( magma_ceildiv( n, local_block_size ) );
dim3 Gs_next;
int Ms = local_block_size * sizeof( magmaDoubleComplex );
magmaDoubleComplex_ptr aux1 = d1, aux2 = d2;
int b = 1;
if ( A.storage_type == Magma_CSR)
magma_zbicgmerge_spmv1_kernel<<< Gs, Bs, Ms, queue->cuda_stream()>>>
( n, A.dval, A.drow, A.dcol, dp, dr, dv, d1 );
else
printf("error: only CSR format supported.\n");
while( Gs.x > 1 ) {
Gs_next.x = magma_ceildiv( Gs.x, Bs.x );
if ( Gs_next.x == 1 ) Gs_next.x = 2;
magma_zreduce_kernel_spmv1<<< Gs_next.x/2, Bs.x/2, Ms/2, queue->cuda_stream()>>>
( Gs.x, n, aux1, aux2 );
Gs_next.x = Gs_next.x /2;
Gs.x = Gs_next.x;
b = 1 - b;
if ( b ) { aux1 = d1; aux2 = d2; }
else { aux2 = d1; aux1 = d2; }
}
magma_zcopyvector( 1, aux1, 1, skp, 1, queue );
dim3 Bs2( 2 );
dim3 Gs2( 1 );
magma_zbicgstab_alphakernel<<< Gs2, Bs2, 0, queue->cuda_stream()>>>( skp );
return MAGMA_SUCCESS;
}
/* -------------------------------------------------------------------------- */
// accelerated block reduction for multiple vectors
__global__ void
magma_zreduce_kernel_spmv2(
int Gs,
int n,
magmaDoubleComplex * vtmp,
magmaDoubleComplex * vtmp2 )
{
extern __shared__ magmaDoubleComplex temp[];
int Idx = threadIdx.x;
int blockSize = 128;
int gridSize = blockSize * 2 * gridDim.x;
int j;
for( j=0; j<2; j++){
int i = blockIdx.x * ( blockSize * 2 ) + Idx;
temp[Idx+j*(blockSize)] = MAGMA_Z_MAKE( 0.0, 0.0);
while (i < Gs ) {
temp[ Idx+j*(blockSize) ] += vtmp[ i+j*n ];
temp[ Idx+j*(blockSize) ] +=
( i + (blockSize) < Gs ) ? vtmp[ i+j*n + (blockSize) ]
: MAGMA_Z_MAKE( 0.0, 0.0);
i += gridSize;
}
}
__syncthreads();
if ( Idx < 64 ){
for( j=0; j<2; j++){
temp[ Idx+j*(blockSize) ] += temp[ Idx+j*(blockSize) + 64 ];
}
}
__syncthreads();
#if defined(PRECISION_z) || defined(PRECISION_c)
if( Idx < 32 ){
for( j=0; j<2; j++)
temp[ Idx+j*(blockSize) ] += temp[ Idx+j*(blockSize) + 32 ];
__syncthreads();
for( j=0; j<2; j++)
temp[ Idx+j*(blockSize) ] += temp[ Idx+j*(blockSize) + 16 ];
__syncthreads();
for( j=0; j<2; j++)
temp[ Idx+j*(blockSize) ] += temp[ Idx+j*(blockSize) + 8 ];
__syncthreads();
for( j=0; j<2; j++)
temp[ Idx+j*(blockSize) ] += temp[ Idx+j*(blockSize) + 4 ];
__syncthreads();
for( j=0; j<2; j++)
temp[ Idx+j*(blockSize) ] += temp[ Idx+j*(blockSize) + 2 ];
__syncthreads();
for( j=0; j<2; j++)
temp[ Idx+j*(blockSize) ] += temp[ Idx+j*(blockSize) + 1 ];
__syncthreads();
}
#endif
#if defined(PRECISION_d)
if( Idx < 32 ){
volatile double *temp2 = temp;
for( j=0; j<2; j++){
temp2[ Idx+j*(blockSize) ] += temp2[ Idx+j*(blockSize) + 32 ];
temp2[ Idx+j*(blockSize) ] += temp2[ Idx+j*(blockSize) + 16 ];
temp2[ Idx+j*(blockSize) ] += temp2[ Idx+j*(blockSize) + 8 ];
temp2[ Idx+j*(blockSize) ] += temp2[ Idx+j*(blockSize) + 4 ];
temp2[ Idx+j*(blockSize) ] += temp2[ Idx+j*(blockSize) + 2 ];
temp2[ Idx+j*(blockSize) ] += temp2[ Idx+j*(blockSize) + 1 ];
}
}
#endif
#if defined(PRECISION_s)
if( Idx < 32 ){
volatile float *temp2 = temp;
for( j=0; j<2; j++){
temp2[ Idx+j*(blockSize) ] += temp2[ Idx+j*(blockSize) + 32 ];
temp2[ Idx+j*(blockSize) ] += temp2[ Idx+j*(blockSize) + 16 ];
temp2[ Idx+j*(blockSize) ] += temp2[ Idx+j*(blockSize) + 8 ];
temp2[ Idx+j*(blockSize) ] += temp2[ Idx+j*(blockSize) + 4 ];
temp2[ Idx+j*(blockSize) ] += temp2[ Idx+j*(blockSize) + 2 ];
temp2[ Idx+j*(blockSize) ] += temp2[ Idx+j*(blockSize) + 1 ];
}
}
#endif
if ( Idx == 0 ){
for( j=0; j<2; j++){
vtmp2[ blockIdx.x+j*n ] = temp[ j*(blockSize) ];
}
}
}
__global__ void
magma_zbicgmerge_spmv2_kernel(
int n,
magmaDoubleComplex * dval,
magma_index_t * drowptr,
magma_index_t * dcolind,
magmaDoubleComplex * s,
magmaDoubleComplex * t,
magmaDoubleComplex * vtmp )
{
extern __shared__ magmaDoubleComplex temp[];
int Idx = threadIdx.x;
int i = blockIdx.x * blockDim.x + Idx;
int j;
if( i<n ){
magmaDoubleComplex dot = MAGMA_Z_ZERO;
int start = drowptr[ i ];
int end = drowptr[ i+1 ];
for( j=start; j<end; j++)
dot += dval[ j ] * s[ dcolind[j] ];
t[ i ] = dot;
}
__syncthreads();
// 2 vectors
if (i<n){
magmaDoubleComplex tmp2 = t[i];
temp[Idx] = s[i] * tmp2;
temp[Idx+blockDim.x] = tmp2 * tmp2;
}
else {
for( j=0; j<2; j++)
temp[Idx+j*blockDim.x] = MAGMA_Z_MAKE( 0.0, 0.0);
}
__syncthreads();
if ( Idx < 128 ){
for( j=0; j<2; j++){
temp[ Idx+j*blockDim.x ] += temp[ Idx+j*blockDim.x + 128 ];
}
}
__syncthreads();
if ( Idx < 64 ){
for( j=0; j<2; j++){
temp[ Idx+j*blockDim.x ] += temp[ Idx+j*blockDim.x + 64 ];
}
}
__syncthreads();
#if defined(PRECISION_z) || defined(PRECISION_c)
if( Idx < 32 ){
for( j=0; j<2; j++)
temp[ Idx+j*blockDim.x ] += temp[ Idx+j*blockDim.x + 32 ];
__syncthreads();
for( j=0; j<2; j++)
temp[ Idx+j*blockDim.x ] += temp[ Idx+j*blockDim.x + 16 ];
__syncthreads();
for( j=0; j<2; j++)
temp[ Idx+j*blockDim.x ] += temp[ Idx+j*blockDim.x + 8 ];
__syncthreads();
for( j=0; j<2; j++)
temp[ Idx+j*blockDim.x ] += temp[ Idx+j*blockDim.x + 4 ];
__syncthreads();
for( j=0; j<2; j++)
temp[ Idx+j*blockDim.x ] += temp[ Idx+j*blockDim.x + 2 ];
__syncthreads();
for( j=0; j<2; j++)
temp[ Idx+j*blockDim.x ] += temp[ Idx+j*blockDim.x + 1 ];
__syncthreads();
}
#endif
#if defined(PRECISION_d)
if( Idx < 32 ){
volatile double *temp2 = temp;
for( j=0; j<2; j++){
temp2[ Idx+j*blockDim.x ] += temp2[ Idx+j*blockDim.x + 32 ];
temp2[ Idx+j*blockDim.x ] += temp2[ Idx+j*blockDim.x + 16 ];
temp2[ Idx+j*blockDim.x ] += temp2[ Idx+j*blockDim.x + 8 ];
temp2[ Idx+j*blockDim.x ] += temp2[ Idx+j*blockDim.x + 4 ];
temp2[ Idx+j*blockDim.x ] += temp2[ Idx+j*blockDim.x + 2 ];
temp2[ Idx+j*blockDim.x ] += temp2[ Idx+j*blockDim.x + 1 ];
}
}
#endif
#if defined(PRECISION_s)
if( Idx < 32 ){
volatile float *temp2 = temp;
for( j=0; j<2; j++){
temp2[ Idx+j*blockDim.x ] += temp2[ Idx+j*blockDim.x + 32 ];
temp2[ Idx+j*blockDim.x ] += temp2[ Idx+j*blockDim.x + 16 ];
temp2[ Idx+j*blockDim.x ] += temp2[ Idx+j*blockDim.x + 8 ];
temp2[ Idx+j*blockDim.x ] += temp2[ Idx+j*blockDim.x + 4 ];
temp2[ Idx+j*blockDim.x ] += temp2[ Idx+j*blockDim.x + 2 ];
temp2[ Idx+j*blockDim.x ] += temp2[ Idx+j*blockDim.x + 1 ];
}
}
#endif
if ( Idx == 0 ){
for( j=0; j<2; j++){
vtmp[ blockIdx.x+j*n ] = temp[ j*blockDim.x ];
}
}
}
__global__ void
magma_zbicgstab_omegakernel(
magmaDoubleComplex * skp ){
int i = blockIdx.x * blockDim.x + threadIdx.x;
if( i==0 ){
skp[2] = skp[6]/skp[7];
skp[3] = skp[4];
}
}
/**
Purpose
-------
Merges the second SpmV using CSR with the dot product
and the computation of omega
Arguments
---------
@param[in]
A magma_z_matrix
input matrix
@param[in]
d1 magmaDoubleComplex_ptr
temporary vector
@param[in]
d2 magmaDoubleComplex_ptr
temporary vector
@param[in]
ds magmaDoubleComplex_ptr
input vector s
@param[in]
dt magmaDoubleComplex_ptr
output vector t
@param[in,out]
skp magmaDoubleComplex_ptr
array for parameters
@param[in]
queue magma_queue_t
Queue to execute in.
@ingroup magmasparse_zgegpuk
********************************************************************/
extern "C" magma_int_t
magma_zbicgmerge_spmv2(
magma_z_matrix A,
magmaDoubleComplex_ptr d1,
magmaDoubleComplex_ptr d2,
magmaDoubleComplex_ptr ds,
magmaDoubleComplex_ptr dt,
magmaDoubleComplex_ptr skp,
magma_queue_t queue )
{
int n = A.num_rows;
int local_block_size=256;
dim3 Bs( local_block_size );
dim3 Gs( magma_ceildiv( n, local_block_size ) );
dim3 Gs_next;
int Ms = 2*local_block_size * sizeof( magmaDoubleComplex );
magmaDoubleComplex_ptr aux1 = d1, aux2 = d2;
int b = 1;
if ( A.storage_type == Magma_CSR)
magma_zbicgmerge_spmv2_kernel<<< Gs, Bs, Ms, queue->cuda_stream()>>>
( n, A.dval, A.drow, A.dcol, ds, dt, d1 );
else
printf("error: only CSR format supported.\n");
while( Gs.x > 1 ) {
Gs_next.x = magma_ceildiv( Gs.x, Bs.x );
if ( Gs_next.x == 1 ) Gs_next.x = 2;
magma_zreduce_kernel_spmv2<<< Gs_next.x/2, Bs.x/2, Ms/2, queue->cuda_stream()>>>
( Gs.x, n, aux1, aux2 );
Gs_next.x = Gs_next.x /2;
Gs.x = Gs_next.x;
b = 1 - b;
if ( b ) { aux1 = d1; aux2 = d2; }
else { aux2 = d1; aux1 = d2; }
}
magma_zcopyvector( 1, aux1, 1, skp+6, 1, queue );
magma_zcopyvector( 1, aux1+n, 1, skp+7, 1, queue );
dim3 Bs2( 2 );
dim3 Gs2( 1 );
magma_zbicgstab_omegakernel<<< Gs2, Bs2, 0, queue->cuda_stream()>>>( skp );
return MAGMA_SUCCESS;
}
/* -------------------------------------------------------------------------- */
__global__ void
magma_zbicgmerge_xrbeta_kernel(
int n,
magmaDoubleComplex * rr,
magmaDoubleComplex * r,
magmaDoubleComplex * p,
magmaDoubleComplex * s,
magmaDoubleComplex * t,
magmaDoubleComplex * x,
magmaDoubleComplex * skp,
magmaDoubleComplex * vtmp )
{
extern __shared__ magmaDoubleComplex temp[];
int Idx = threadIdx.x;
int i = blockIdx.x * blockDim.x + Idx;
int j;
magmaDoubleComplex alpha=skp[0];
magmaDoubleComplex omega=skp[2];
if( i<n ){
magmaDoubleComplex sl;
sl = s[i];
x[i] = x[i] + alpha * p[i] + omega * sl;
r[i] = sl - omega * t[i];
}
__syncthreads();
// 2 vectors
if (i<n){
magmaDoubleComplex tmp2 = r[i];
temp[Idx] = rr[i] * tmp2;
temp[Idx+blockDim.x] = tmp2 * tmp2;
}
else {
for( j=0; j<2; j++)
temp[Idx+j*blockDim.x] = MAGMA_Z_MAKE( 0.0, 0.0);
}
__syncthreads();
if ( Idx < 128 ){
for( j=0; j<2; j++){
temp[ Idx+j*blockDim.x ] += temp[ Idx+j*blockDim.x + 128 ];
}
}
__syncthreads();
if ( Idx < 64 ){
for( j=0; j<2; j++){
temp[ Idx+j*blockDim.x ] += temp[ Idx+j*blockDim.x + 64 ];
}
}
__syncthreads();
#if defined(PRECISION_z) || defined(PRECISION_c)
if( Idx < 32 ){
for( j=0; j<2; j++)
temp[ Idx+j*blockDim.x ] += temp[ Idx+j*blockDim.x + 32 ];
__syncthreads();
for( j=0; j<2; j++)
temp[ Idx+j*blockDim.x ] += temp[ Idx+j*blockDim.x + 16 ];
__syncthreads();
for( j=0; j<2; j++)
temp[ Idx+j*blockDim.x ] += temp[ Idx+j*blockDim.x + 8 ];
__syncthreads();
for( j=0; j<2; j++)
temp[ Idx+j*blockDim.x ] += temp[ Idx+j*blockDim.x + 4 ];
__syncthreads();
for( j=0; j<2; j++)
temp[ Idx+j*blockDim.x ] += temp[ Idx+j*blockDim.x + 2 ];
__syncthreads();
for( j=0; j<2; j++)
temp[ Idx+j*blockDim.x ] += temp[ Idx+j*blockDim.x + 1 ];
__syncthreads();
}
#endif
#if defined(PRECISION_d)
if( Idx < 32 ){
volatile double *temp2 = temp;
for( j=0; j<2; j++){
temp2[ Idx+j*blockDim.x ] += temp2[ Idx+j*blockDim.x + 32 ];
temp2[ Idx+j*blockDim.x ] += temp2[ Idx+j*blockDim.x + 16 ];
temp2[ Idx+j*blockDim.x ] += temp2[ Idx+j*blockDim.x + 8 ];
temp2[ Idx+j*blockDim.x ] += temp2[ Idx+j*blockDim.x + 4 ];
temp2[ Idx+j*blockDim.x ] += temp2[ Idx+j*blockDim.x + 2 ];
temp2[ Idx+j*blockDim.x ] += temp2[ Idx+j*blockDim.x + 1 ];
}
}
#endif
#if defined(PRECISION_s)
if( Idx < 32 ){
volatile float *temp2 = temp;
for( j=0; j<2; j++){
temp2[ Idx+j*blockDim.x ] += temp2[ Idx+j*blockDim.x + 32 ];
temp2[ Idx+j*blockDim.x ] += temp2[ Idx+j*blockDim.x + 16 ];
temp2[ Idx+j*blockDim.x ] += temp2[ Idx+j*blockDim.x + 8 ];
temp2[ Idx+j*blockDim.x ] += temp2[ Idx+j*blockDim.x + 4 ];
temp2[ Idx+j*blockDim.x ] += temp2[ Idx+j*blockDim.x + 2 ];
temp2[ Idx+j*blockDim.x ] += temp2[ Idx+j*blockDim.x + 1 ];
}
}
#endif
if ( Idx == 0 ){
for( j=0; j<2; j++){
vtmp[ blockIdx.x+j*n ] = temp[ j*blockDim.x ];
}
}
}
__global__ void
magma_zbicgstab_betakernel(
magmaDoubleComplex * skp )
{
int i = blockIdx.x * blockDim.x + threadIdx.x;
if( i==0 ){
magmaDoubleComplex tmp1 = skp[4]/skp[3];
magmaDoubleComplex tmp2 = skp[0] / skp[2];
skp[1] = tmp1*tmp2;
}
}
/**
Purpose
-------
Merges the second SpmV using CSR with the dot product
and the computation of omega
Arguments
---------
@param[in]
n int
dimension n
@param[in]
d1 magmaDoubleComplex_ptr
temporary vector
@param[in]
d2 magmaDoubleComplex_ptr
temporary vector
@param[in]
rr magmaDoubleComplex_ptr
input vector rr
@param[in]
r magmaDoubleComplex_ptr
input/output vector r
@param[in]
p magmaDoubleComplex_ptr
input vector p
@param[in]
s magmaDoubleComplex_ptr
input vector s
@param[in]
t magmaDoubleComplex_ptr
input vector t
@param[out]
x magmaDoubleComplex_ptr
output vector x
@param[in]
skp magmaDoubleComplex_ptr
array for parameters
@param[in]
queue magma_queue_t
Queue to execute in.
@ingroup magmasparse_zgegpuk
********************************************************************/
extern "C" magma_int_t
magma_zbicgmerge_xrbeta(
magma_int_t n,
magmaDoubleComplex_ptr d1,
magmaDoubleComplex_ptr d2,
magmaDoubleComplex_ptr rr,
magmaDoubleComplex_ptr r,
magmaDoubleComplex_ptr p,
magmaDoubleComplex_ptr s,
magmaDoubleComplex_ptr t,
magmaDoubleComplex_ptr x,
magmaDoubleComplex_ptr skp,
magma_queue_t queue )
{
int local_block_size=256;
dim3 Bs( local_block_size );
dim3 Gs( magma_ceildiv( n, local_block_size ) );
dim3 Gs_next;
int Ms = 2*local_block_size * sizeof( magmaDoubleComplex );
magmaDoubleComplex_ptr aux1 = d1, aux2 = d2;
int b = 1;
magma_zbicgmerge_xrbeta_kernel<<< Gs, Bs, Ms, queue->cuda_stream()>>>
( n, rr, r, p, s, t, x, skp, d1);
while( Gs.x > 1 ) {
Gs_next.x = magma_ceildiv( Gs.x, Bs.x );
if ( Gs_next.x == 1 ) Gs_next.x = 2;
magma_zreduce_kernel_spmv2<<< Gs_next.x/2, Bs.x/2, Ms/2, queue->cuda_stream()>>>
( Gs.x, n, aux1, aux2 );
Gs_next.x = Gs_next.x /2;
Gs.x = Gs_next.x;
b = 1 - b;
if ( b ) { aux1 = d1; aux2 = d2; }
else { aux2 = d1; aux1 = d2; }
}
magma_zcopyvector( 1, aux1, 1, skp+4, 1, queue );
magma_zcopyvector( 1, aux1+n, 1, skp+5, 1, queue );
dim3 Bs2( 2 );
dim3 Gs2( 1 );
magma_zbicgstab_betakernel<<< Gs2, Bs2, 0, queue->cuda_stream()>>>( skp );
return MAGMA_SUCCESS;
}
/* -------------------------------------------------------------------------- */
|
1c5b3459ad57324d6bb6e0b8dfb3d7ae85b163bb.hip | // !!! This is a file automatically generated by hipify!!!
#include <mpi.h>
#include <stdio.h>
#include <stdlib.h>
#include <string.h>
#include <unistd.h>
#include <sys/time.h>
//#include <hip/hip_runtime.h>
#include <iostream>
#include <math.h>
//#include "hip/hip_runtime_api.h"
#include "commonBMT.h"
#include "cudaBMTKernel_MultiDim.cuh"
using namespace std;
/**
* @Operation mode: the program can either operate in master or worker mode.
* The master is created through the job submission process
* by the user whereas the workers are in-turn created by
* the master. There will always be only one master per
* job submission.
*/
#define OP_STR_WORKER "worker"
#define OP_MODE_MASTER 0x0a
#define OP_MODE_WORKER 0x0b
int op_mode = OP_MODE_MASTER;
/**
* @File message
*/
#define MODE_NORM 0x0000
#define MODE_HELP 0x0001
#define DSET_XS 0x0000
#define DSET_S 0x0001
#define DSET_M 0x0002
#define DSET_L 0x0003
#define DSET_XL 0x0004
#define E_SUCCESS 0x0000
#define E_NO_ARG 0x0001
#define E_UNKNOWN 0x0002
#define E_INV_PE 0x0003
#define E_INV_PEV 0x0004
#define E_INV_DS 0x0005
#define E_INV_DSV 0x0006
/**
* @Common MPI attributes
*/
#define MPI_NN_ROOT 0
#ifndef _DOUBLE_PRECISION
#define MPI_PRECISION MPI_FLOAT
#else //_DOUBLE_PRECISION
#define MPI_PRECISION MPI_DOUBLE
#endif //_DOUBLE_PRECISION
int parent_rank;
int rank;
int comm_size;
bool verify = false;
/**
* @Process attributes
*/
int max_pe;
int pe_node;
int pe_dim;
int pe_per_node;
int wid;
int ret;
int iter;
/**
* @Data type
*/
#ifndef _DOUBLE_PRECISION
#define PRECISION float
#else
#define PRECISION double
#endif
#ifdef _DOUBLE_PRECISION
#define MPI_PRECISION MPI_DOUBLE
#else
#define MPI_PRECISION MPI_FLOAT
#endif
PRECISION **** a, **** b, **** c, *** p;
PRECISION *** wrk1, *** wrk2, *** bnd;
Matrix * pa, * pb, * pc, * pp, * pwrk1, * pwrk2, *pbnd;
int mx, my, mz, imax, jmax, kmax, it;
PRECISION omega = 0.8;
PRECISION wgosa, gosa;
/**
* @himeno config
*/
int gargc;
char ** gargv;
int mode = MODE_NORM;
BMT_Config config;
//MPI
typedef struct {
int l;
int r;
} Neighbor;
typedef struct {
Neighbor x;
Neighbor y;
Neighbor z;
} Cart_Neighbor;
int numpes, peid, cartid[3];
MPI_Comm comm_cart;
MPI_Datatype jk_plane, ik_plane, ij_plane;
Cart_Neighbor nb;
#define NUM_WORKER_PROCS 1
/**
* @Init parameter
*/
inline int SetPreference(int argc, char ** argv) {
#define IS_OPTION(str) (!strcmp(argv[idx], str))
gargc = argc; gargv = argv;
int idx = 1;
while (idx < argc) {
if IS_OPTION("-pe") {
if ((idx + 3) >= argc)
return E_INV_PE;
int temp = atoi(argv[++idx]);
if (temp < 1)
return E_INV_PEV;
config.ndx0 = temp;
temp = atoi(argv[++idx]);
if (temp < 1)
return E_INV_PEV;
config.ndy0 = temp;
temp = atoi(argv[++idx]);
if (temp < 1)
return E_INV_PEV;
config.ndz0 = temp;
}
else if (IS_OPTION("-ds") || IS_OPTION("--dataset-size")) {
if ((idx + 1) >= argc)
return E_INV_DS;
idx++;
if IS_OPTION("xs") {
config.mx0 = 33;
config.my0 = 33;
config.mz0 = 65;
}
else if IS_OPTION("s") {
config.mx0 = 65;
config.my0 = 65;
config.mz0 = 129;
}
else if IS_OPTION("m") {
config.mx0 = 129;
config.my0 = 129;
config.mz0 = 257;
}
else if IS_OPTION("l") {
config.mx0 = 257;
config.my0 = 257;
config.mz0 = 513;
}
else if IS_OPTION("xl") {
config.mx0 = 513;
config.my0 = 513;
config.mz0 = 1025;
}
else {
return E_INV_DSV;
}
}
else if (IS_OPTION("-h") || IS_OPTION("--help")) {
mode = MODE_HELP;
return E_SUCCESS;
}
idx++;
}
return E_SUCCESS;
#undef IS_OPTION
}
/**
* @message functions
*/
const char * e_msg[] = {
"No error",
"No arguments specified.",
"Unrecognized arguments presented.",
"Requires three PE numbers along the dimensions.",
"Invalid PE numbers specified.",
"Requires the size of dataset (xs, s, m, l, xl).",
"Unrecognized dataset size"
};
const char h_msg[] = {"\
Usage: %s [OPTIONS] [...] \n\
Options are available in both short and long format: \n\n\
\t-pe [pe_x pe_y pe_z] Specify numbers of PEs along dimensions \n\
\t-h, --help Show this help message. \n\
"};
/**
* @Error check functions list
*/
inline void print_help() {
char msg[512];
sprintf(msg, h_msg, gargv[0]);
cout << endl << msg << endl << endl;
}
inline void CheckError(int rc) {
if (rc != E_SUCCESS) {
cerr << endl << "Error: " << e_msg[rc] << endl;
if (rc == E_NO_ARG)
print_help();
else
cout << endl;
exit(rc);
}
}
inline void CheckCUDAError (hipError_t ce) {
if (ce != hipSuccess) {
cout << "CUDA_ERROR: " << hipGetErrorString(ce) << endl;
exit(0);
}
}
/**
* @Himeno initialize functions
*
*/
//Work division and assignment for PEs
int bmtInitMax(int lmx, int lmy, int lmz, int peid) {
int * mx1, * my1, * mz1;
int * mx2, * my2, * mz2;
int tmp;
mx1 = new int [config.mx0 + 1];
my1 = new int [config.my0 + 1];
mz1 = new int [config.mz0 + 1];
mx2 = new int [config.mx0 + 1];
my2 = new int [config.my0 + 1];
mz2 = new int [config.mz0 + 1];
tmp = mx / config.ndx0;
mx1[0] = 0;
for (int i=1;i<=config.ndx0;i++) {
if (i <= mx % config.ndx0)
mx1[i] = mx1[i - 1] + tmp + 1;
else
mx1[i] = mx1[i - 1] + tmp;
}
tmp = my / config.ndy0;
my1[0] = 0;
for (int i=1;i<=config.ndy0;i++) {
if (i <= my % config.ndy0)
my1[i] = my1[i - 1] + tmp + 1;
else
my1[i] = my1[i - 1] + tmp;
}
tmp = mz / config.ndz0;
mz1[0] = 0;
for (int i=1;i<=config.ndz0;i++) {
if (i <= mz % config.ndz0)
mz1[i] = mz1[i - 1] + tmp + 1;
else
mz1[i] = mz1[i - 1] + tmp;
}
//************************************************************************
for(int i=0;i<config.ndx0;i++) {
mx2[i] = mx1[i+1] - mx1[i];
if(i != 0)
mx2[i] = mx2[i] + 1;
if(i != config.ndx0-1)
mx2[i] = mx2[i] + 1;
}
for(int i=0;i<config.ndy0;i++) {
my2[i] = my1[i+1] - my1[i];
if(i != 0)
my2[i] = my2[i] + 1;
if(i != config.ndy0-1)
my2[i] = my2[i] + 1;
}
for(int i=0;i<config.ndz0;i++) {
mz2[i] = mz1[i+1] - mz1[i];
if(i != 0)
mz2[i] = mz2[i] + 1;
if(i != config.ndz0-1)
mz2[i] = mz2[i] + 1;
}
//************************************************************************
imax = mx2[0];
jmax = my2[0];
kmax = mz2[peid];
delete [] mz2;
delete [] my2;
delete [] mx2;
delete [] mz1;
delete [] my1;
delete [] mx1;
return 0;
}
/**
* @Routines for the master and workers: See the implementations below.
*
*/
int MasterRoutine(int argc, char ** argv);
int WorkerRoutine(int argc, char ** argv);
int SetOperationMode(int argc, char ** argv) {
bool matched = false;
for (int idx=0;idx<argc;idx++) {
if (!strcmp(argv[idx], OP_STR_WORKER)) {
op_mode = OP_MODE_WORKER;
matched = true;
parent_rank = atoi(argv[idx + 1]);
wid = atoi(argv[idx + 2]);
iter = atoi(argv[idx + 3]);
}
if (!strcmp(argv[idx], "-pm") && op_mode == OP_MODE_MASTER) {
max_pe = atoi(argv[idx+1]);
}
if (!strcmp(argv[idx], "-pn") && op_mode == OP_MODE_MASTER) {
pe_node = atoi(argv[idx+1]);
}
if (!strcmp(argv[idx], "-pp") && op_mode == OP_MODE_MASTER) {
pe_per_node = atoi(argv[idx+1]);
}
if (!strcmp(argv[idx], "-it")) {
iter = atoi(argv[idx+1]);
}
if (!strcmp(argv[idx], "-v") && op_mode == OP_MODE_MASTER) {
verify = true;
}
}
if (!matched)
op_mode = OP_MODE_MASTER;
}
int main(int argc, char ** argv) {
MPI_Init(&argc, &argv);
MPI_Comm_rank(MPI_COMM_WORLD, &rank);
MPI_Comm_size(MPI_COMM_WORLD, &comm_size);
SetOperationMode(argc, argv);
switch(op_mode) {
case OP_MODE_MASTER:
ret = MasterRoutine(argc, argv);
break;
case OP_MODE_WORKER:
ret = WorkerRoutine(argc, argv);
break;
}
return 0;
}
/**
* @Routine for the master:
*/
int MasterRoutine(int argc, char ** argv) {
MPI_Barrier(MPI_COMM_WORLD);
char rank_str[8], wid_str[8], iter_str[8];
snprintf(rank_str, 8, "%d", rank);
// Control PE master PE rank = 0
int count = 0;
int count_buf[3];
int child_buf[2];
int control_buf[4];
int compute_info[7];
PRECISION gosa_sum[pe_per_node];
bool end = false;
int rev_id = 0;
timeval t_b, t_e;
double t_p;
MPI_Request request;
MPI_Status status;
#if 0
// one-sided communication buffer create
MPI_Win win;
MPI_Win_create(control_buf, pe_node, sizeof(int), MPI_INFO_NULL, MPI_COMM_WORLD, &win);
#endif
// Initialize data info
CheckError( SetPreference(argc, argv));
config.mimax = (config.ndx0 == 1) ?
config.mx0 : (config.mx0 / config.ndx0) + 3;
config.mjmax = (config.ndy0 == 1) ?
config.my0 : (config.my0 / config.ndy0) + 3;
config.mkmax = (config.ndz0 == 1) ?
config.mz0 : (config.mz0 / config.ndz0) + 3;
mx = config.mx0 - 1; my = config.my0 - 1; mz = config.mz0 - 1;
if (mode == MODE_HELP) {
print_help();
exit(0);
}
memset(count_buf, 0, sizeof(count_buf));
memset(child_buf, 0, sizeof(child_buf));
memset(control_buf, 0, sizeof(control_buf));
memset(compute_info, 0, sizeof(compute_info));
if (rank == 0) {
cout << "++++++++" << endl << "PE_per_node" << pe_per_node << endl
<< "PE_Max: " << max_pe << endl
<< "PE_Node: " << pe_node << endl
<< "PE_iter: " << iter << endl
<< "++++++++" << endl;
}
if (rank == 0 ) {
// Control PE (MPE)
int idx;
gettimeofday(&t_b, NULL);
// control parameter
for (idx=0; idx<iter; idx++) {
count = 0;
memset(count_buf, 0, sizeof(count_buf));
memset(child_buf, 0, sizeof(child_buf));
memset(control_buf, 0, sizeof(control_buf));
memset(compute_info, 0, sizeof(compute_info));
memset(gosa_sum, 0, sizeof(gosa_sum));
wgosa = 0.0;
gosa = 0.0;
while (count < max_pe) {
if (idx == 0)
MPI_Recv(control_buf, 4, MPI_INT, MPI_ANY_SOURCE, 0, MPI_COMM_WORLD, &status);
else {
count_buf[1] = 1;
for (int j=1; j<pe_node; j++)
MPI_Send(count_buf, 3, MPI_INT, j, 0, MPI_COMM_WORLD);
}
#if 0
MPI_Win_fence(0, win);
cout << "==============" << endl << "Show Status: " << local_buf[i] << endl;
cout << "==============" << endl << endl;
#endif
if (idx == 0) {
rev_id = status.MPI_SOURCE;
if (rev_id == -1) {
cout << "Error: Master PE Monitor Receiving Error!" << endl;
exit(0);
}
// control_buf[0] indicates if task is completed by master PE, 2 yes, 1 no
if (control_buf[0] == 1) {
// count_buf[1] indicates control PE accept a new request of child PE
count_buf[1] = 1;
// count_buf[2] indicates the WID for new child PE
count_buf[2] = count;
// send WID
MPI_Send(count_buf, 3, MPI_INT, rev_id, 0, MPI_COMM_WORLD);
}
wid = count;
count += pe_per_node;
control_buf[0] = 0;
control_buf[1] = 0;
control_buf[2] = 0;
}
if (idx > 0)
break;
}
// receive the final dataset before ending
for (int i=1; i< pe_node; i++) {
if (idx == 0) {
// receive control_buf to have final data WID
MPI_Recv(control_buf, 4, MPI_INT, i, 0, MPI_COMM_WORLD, MPI_STATUS_IGNORE);
count_buf[1] = 2;
// send end flag to WPEs
MPI_Send(count_buf, 3, MPI_INT, i, 0, MPI_COMM_WORLD);
}
//Receive result data from WPEs
MPI_Recv(&wgosa, 1, MPI_PRECISION, i, 0, MPI_COMM_WORLD, MPI_STATUS_IGNORE);
gosa += wgosa;
}
//cout << "secs: " << sec << "/" << duration << "\xd";
cout << idx << ":" << gosa << endl;
}
} else {
MPI_Comm children_comm[18];
int wid_buf[8];
int comm_count = 0;
PRECISION tmp_gosa = 0.0;
int idx;
for (idx=0; idx<iter; idx++) {
memset(count_buf, 0, sizeof(count_buf));
memset(child_buf, 0, sizeof(child_buf));
memset(control_buf, 0, sizeof(control_buf));
memset(compute_info, 0, sizeof(compute_info));
memset(gosa_sum, 0, sizeof(gosa_sum));
tmp_gosa = 0.0;
end = false;
while (!end) {
control_buf[0] = 1;
if (idx == 0) {
// send request for WID
MPI_Send(control_buf, 4, MPI_INT, 0, 0, MPI_COMM_WORLD);
// receive WID
MPI_Recv(count_buf, 3, MPI_INT, 0, 0, MPI_COMM_WORLD, MPI_STATUS_IGNORE);
}
else {
MPI_Recv(count_buf, 3, MPI_INT, 0, 0, MPI_COMM_WORLD, MPI_STATUS_IGNORE);
}
if (count_buf[1] == 1) {
if (idx == 0) {
// set WID
wid = count_buf[2];
snprintf(wid_str, 8, "%d", count_buf[2]);
snprintf(iter_str, 8, "%d", iter);
char * c_argv[] = {const_cast<char *>("worker"), rank_str, wid_str, iter_str, NULL};
//Each of the processes in the master-job spawns a worker-job
//consisting of NUM_WORKER_PROCS processes.
int pe_spn;
if (wid+pe_per_node > max_pe)
pe_spn = max_pe - wid;
else
pe_spn = pe_per_node;
// host info
char hostname[256];
gethostname(hostname, 256);
#if 1
int offset=0;
for (int i=0;i<strlen(hostname);i++) {
if (hostname[i] == '.') {
offset = i;
break;
}
}
hostname[offset] = '\0';
#endif
MPI_Info spawn_info;
MPI_Info_create(&spawn_info);
MPI_Info_set(spawn_info, "host", hostname);
printf("pe: %d, pe_spn: %d, wid: %d\n", rank, pe_spn, wid);
MPI_Comm_spawn(argv[0], c_argv, pe_spn, spawn_info/*MPI_INFO_NULL*/,
0, MPI_COMM_SELF, &children_comm[comm_count], MPI_ERRCODES_IGNORE);
}
// send dataset and PE info
compute_info[0] = config.ndx0;
compute_info[1] = config.ndy0;
compute_info[2] = config.ndz0;
compute_info[3] = config.mx0;
compute_info[4] = config.my0;
compute_info[5] = config.mz0;
compute_info[6] = gosa;
if (idx == 0) {
// Enable children PE to begin new iteration
for (int k=0; k<pe_per_node; k++) {
if (wid+k < max_pe) {
// send dataset to children PEs
MPI_Send(compute_info, 7, MPI_INT, k, 0, children_comm[comm_count]);
}
}
wid_buf[comm_count] = wid;
for (int k=0; k<pe_per_node; k++) {
int tmpid = wid + k;
if (tmpid < max_pe) {
//Receive the message from all the corresponding workers.
MPI_Recv(&wgosa, 1, MPI_PRECISION, k, 0, children_comm[comm_count], MPI_STATUS_IGNORE);
tmp_gosa += wgosa;
}
}
comm_count++;
control_buf[1] = wid;
control_buf[2] = 1;
} else {
for (int j=0; j<comm_count; j++) {
for (int k=0; k<pe_per_node; k++) {
int tmpid = wid_buf[j] + k;
if (tmpid < max_pe) {
// send dataset to children PEs
MPI_Send(compute_info, 7, MPI_INT, k, 0, children_comm[j]);
}
}
}
for (int j=0; j<comm_count; j++) {
for (int k=0; k<pe_per_node; k++) {
int tmpid = wid_buf[j] + k;
if (tmpid < max_pe) {
//Receive the message from all the corresponding workers.
MPI_Recv(&wgosa, 1, MPI_PRECISION, k, 0, children_comm[j], MPI_STATUS_IGNORE);
tmp_gosa += wgosa;
}
}
}
count_buf[1] =2;
}
} else if (count_buf[1] == 2) {
// Send result dataset
MPI_Send(&tmp_gosa, 1, MPI_PRECISION, 0, 0, MPI_COMM_WORLD);
control_buf[2] = 0;
end = true;
}//elseif
printf("Process: %d...begin\n", rank);
if (idx > 0) {
MPI_Send(&tmp_gosa, 1, MPI_PRECISION, 0, 0, MPI_COMM_WORLD);
end = true;
}//endif
printf("Process: %d...\n", rank);
}//while
}
}
MPI_Barrier(MPI_COMM_WORLD);
if (rank == 0) {
gettimeofday(&t_e, NULL);
t_p = (t_e.tv_sec + t_e.tv_usec * 1e-6) - (t_b.tv_sec + t_b.tv_usec * 1e-6);
printf("Time: %f\n", t_p);
}
cout << "End of Program...... /" << rank << endl;
MPI_Finalize();
return 0;
}
/**
* @Routine for the workers
*/
int WorkerRoutine(int argc, char ** argv) {
MPI_Comm parent_comm;
int parent_size;
int task_buf[2];
int compute_info[7];
MPI_Comm_get_parent(&parent_comm);
if (parent_comm == MPI_COMM_NULL)
return -1;
//Attention!: The size of the inter-communicator obtained through the
// MPI_Comm_remote_size() will always be '1' since a number
// of NUM_WORKER_PROCS child processes are spawned by each
// of the master processes. Therefore, each group of the
// NUM_WORKER_PROCS child processes recognizes only their
// correspodning master process in the inter-communicator.
MPI_Comm_remote_size(parent_comm, &parent_size);
printf("\tchildren pe: %d, iter: %d\n", rank, iter);
int idx;
for (idx=0; idx<iter; idx++) {
// receive matrix info
MPI_Recv(compute_info, 7, MPI_INT, 0, 0, parent_comm, MPI_STATUS_IGNORE);
if (idx == 0) {
config.ndx0 = compute_info[0];
config.ndy0 = compute_info[1];
config.ndz0 = compute_info[2];
config.mx0 = compute_info[3];
config.my0 = compute_info[4];
config.mz0 = compute_info[5];
config.mimax = (config.ndx0 == 1) ?
config.mx0 : (config.mx0 / config.ndx0) + 3;
config.mjmax = (config.ndy0 == 1) ?
config.my0 : (config.my0 / config.ndy0) + 3;
config.mkmax = (config.ndz0 == 1) ?
config.mz0 : (config.mz0 / config.ndz0) + 3;
mx = config.mx0 - 1; my = config.my0 - 1; mz = config.mz0 - 1;
wid = wid + rank;
// data initialize
bmtInitMax(mx, my, mz, wid);
pa = new Matrix(4, config.mimax, config.mjmax, config.mkmax);
pb = new Matrix(3, config.mimax, config.mjmax, config.mkmax);
pc = new Matrix(3, config.mimax, config.mjmax, config.mkmax);
pp = new Matrix(config.mimax, config.mjmax, config.mkmax);
pwrk1 = new Matrix(config.mimax, config.mjmax, config.mkmax);
pwrk2 = new Matrix(config.mimax, config.mjmax, config.mkmax);
pbnd = new Matrix(config.mimax, config.mjmax, config.mkmax);
bmtInitMt(
*pa, *pb, *pc,
*pp, *pwrk1, *pwrk2,
*pbnd, mx, it,
config.mimax, config.mjmax, config.mkmax,
imax, jmax, kmax);
hipError_t ce = bmtInitDeviceMemory(
pa, pb, pc,
pp, pwrk1, pwrk2,
pbnd, rank);
if (ce != hipSuccess)
cerr << "Error: " << hipGetErrorString(ce) << endl;
a = pa->GetPtr4D();
b = pb->GetPtr4D();
c = pc->GetPtr4D();
p = pp->GetPtr3D();
wrk1 = pwrk1->GetPtr3D();
wrk2 = pwrk2->GetPtr3D();
bnd = pbnd->GetPtr3D();
}
int deviceCnt = 0;
CheckCUDAError( hipGetDeviceCount(&deviceCnt));
// CheckCUDAError( hipSetDevice(rank % deviceCnt));
char hostname[128];
gethostname(hostname, 128);
cout << "\tPE:" << wid << " / " << parent_rank << " iter: " <<iter << " ["
<< hostname << "]: RUN: Device[" << rank%deviceCnt << "]" << endl;
/**********************************************************************
* Launch Kernel
*********************************************************************/
CheckCUDAError( bmtCudaJacobi(&wgosa, pp, imax, jmax, kmax));
#if 0
struct timeval time_b, time_e;
gettimeofday(&time_b, NULL);
gettimeofday(&time_e, NULL);
cout << "Kernel Time: " << (time_e.tv_usec - time_b.tv_usec)*1e-6 + ((double)time_e.tv_sec - (double)time_b.tv_sec) << endl;
#endif
/**********************************************************************
* Finalize
*********************************************************************/
#if 0
char send_buf[256];
snprintf(send_buf, 256, "I am rank %d, the worker of rank %d, own rank: %d",
wid, parent_rank, rank);
printf( "Master(%d): %s \n", parent_rank, send_buf);
#endif
task_buf[0] = 1;
MPI_Send(&wgosa, 1, MPI_PRECISION, 0, 0, parent_comm);
}
MPI_Finalize();
return 0;
}
| 1c5b3459ad57324d6bb6e0b8dfb3d7ae85b163bb.cu | #include <mpi.h>
#include <stdio.h>
#include <stdlib.h>
#include <string.h>
#include <unistd.h>
#include <sys/time.h>
//#include <cuda.h>
#include <iostream>
#include <math.h>
//#include "cuda_runtime_api.h"
#include "commonBMT.h"
#include "cudaBMTKernel_MultiDim.cuh"
using namespace std;
/**
* @Operation mode: the program can either operate in master or worker mode.
* The master is created through the job submission process
* by the user whereas the workers are in-turn created by
* the master. There will always be only one master per
* job submission.
*/
#define OP_STR_WORKER "worker"
#define OP_MODE_MASTER 0x0a
#define OP_MODE_WORKER 0x0b
int op_mode = OP_MODE_MASTER;
/**
* @File message
*/
#define MODE_NORM 0x0000
#define MODE_HELP 0x0001
#define DSET_XS 0x0000
#define DSET_S 0x0001
#define DSET_M 0x0002
#define DSET_L 0x0003
#define DSET_XL 0x0004
#define E_SUCCESS 0x0000
#define E_NO_ARG 0x0001
#define E_UNKNOWN 0x0002
#define E_INV_PE 0x0003
#define E_INV_PEV 0x0004
#define E_INV_DS 0x0005
#define E_INV_DSV 0x0006
/**
* @Common MPI attributes
*/
#define MPI_NN_ROOT 0
#ifndef _DOUBLE_PRECISION
#define MPI_PRECISION MPI_FLOAT
#else //_DOUBLE_PRECISION
#define MPI_PRECISION MPI_DOUBLE
#endif //_DOUBLE_PRECISION
int parent_rank;
int rank;
int comm_size;
bool verify = false;
/**
* @Process attributes
*/
int max_pe;
int pe_node;
int pe_dim;
int pe_per_node;
int wid;
int ret;
int iter;
/**
* @Data type
*/
#ifndef _DOUBLE_PRECISION
#define PRECISION float
#else
#define PRECISION double
#endif
#ifdef _DOUBLE_PRECISION
#define MPI_PRECISION MPI_DOUBLE
#else
#define MPI_PRECISION MPI_FLOAT
#endif
PRECISION **** a, **** b, **** c, *** p;
PRECISION *** wrk1, *** wrk2, *** bnd;
Matrix * pa, * pb, * pc, * pp, * pwrk1, * pwrk2, *pbnd;
int mx, my, mz, imax, jmax, kmax, it;
PRECISION omega = 0.8;
PRECISION wgosa, gosa;
/**
* @himeno config
*/
int gargc;
char ** gargv;
int mode = MODE_NORM;
BMT_Config config;
//MPI
typedef struct {
int l;
int r;
} Neighbor;
typedef struct {
Neighbor x;
Neighbor y;
Neighbor z;
} Cart_Neighbor;
int numpes, peid, cartid[3];
MPI_Comm comm_cart;
MPI_Datatype jk_plane, ik_plane, ij_plane;
Cart_Neighbor nb;
#define NUM_WORKER_PROCS 1
/**
* @Init parameter
*/
inline int SetPreference(int argc, char ** argv) {
#define IS_OPTION(str) (!strcmp(argv[idx], str))
gargc = argc; gargv = argv;
int idx = 1;
while (idx < argc) {
if IS_OPTION("-pe") {
if ((idx + 3) >= argc)
return E_INV_PE;
int temp = atoi(argv[++idx]);
if (temp < 1)
return E_INV_PEV;
config.ndx0 = temp;
temp = atoi(argv[++idx]);
if (temp < 1)
return E_INV_PEV;
config.ndy0 = temp;
temp = atoi(argv[++idx]);
if (temp < 1)
return E_INV_PEV;
config.ndz0 = temp;
}
else if (IS_OPTION("-ds") || IS_OPTION("--dataset-size")) {
if ((idx + 1) >= argc)
return E_INV_DS;
idx++;
if IS_OPTION("xs") {
config.mx0 = 33;
config.my0 = 33;
config.mz0 = 65;
}
else if IS_OPTION("s") {
config.mx0 = 65;
config.my0 = 65;
config.mz0 = 129;
}
else if IS_OPTION("m") {
config.mx0 = 129;
config.my0 = 129;
config.mz0 = 257;
}
else if IS_OPTION("l") {
config.mx0 = 257;
config.my0 = 257;
config.mz0 = 513;
}
else if IS_OPTION("xl") {
config.mx0 = 513;
config.my0 = 513;
config.mz0 = 1025;
}
else {
return E_INV_DSV;
}
}
else if (IS_OPTION("-h") || IS_OPTION("--help")) {
mode = MODE_HELP;
return E_SUCCESS;
}
idx++;
}
return E_SUCCESS;
#undef IS_OPTION
}
/**
* @message functions
*/
const char * e_msg[] = {
"No error",
"No arguments specified.",
"Unrecognized arguments presented.",
"Requires three PE numbers along the dimensions.",
"Invalid PE numbers specified.",
"Requires the size of dataset (xs, s, m, l, xl).",
"Unrecognized dataset size"
};
const char h_msg[] = {"\
Usage: %s [OPTIONS] [...] \n\
Options are available in both short and long format: \n\n\
\t-pe [pe_x pe_y pe_z] Specify numbers of PEs along dimensions \n\
\t-h, --help Show this help message. \n\
"};
/**
* @Error check functions list
*/
inline void print_help() {
char msg[512];
sprintf(msg, h_msg, gargv[0]);
cout << endl << msg << endl << endl;
}
inline void CheckError(int rc) {
if (rc != E_SUCCESS) {
cerr << endl << "Error: " << e_msg[rc] << endl;
if (rc == E_NO_ARG)
print_help();
else
cout << endl;
exit(rc);
}
}
inline void CheckCUDAError (cudaError_t ce) {
if (ce != cudaSuccess) {
cout << "CUDA_ERROR: " << cudaGetErrorString(ce) << endl;
exit(0);
}
}
/**
* @Himeno initialize functions
*
*/
//Work division and assignment for PEs
int bmtInitMax(int lmx, int lmy, int lmz, int peid) {
int * mx1, * my1, * mz1;
int * mx2, * my2, * mz2;
int tmp;
mx1 = new int [config.mx0 + 1];
my1 = new int [config.my0 + 1];
mz1 = new int [config.mz0 + 1];
mx2 = new int [config.mx0 + 1];
my2 = new int [config.my0 + 1];
mz2 = new int [config.mz0 + 1];
tmp = mx / config.ndx0;
mx1[0] = 0;
for (int i=1;i<=config.ndx0;i++) {
if (i <= mx % config.ndx0)
mx1[i] = mx1[i - 1] + tmp + 1;
else
mx1[i] = mx1[i - 1] + tmp;
}
tmp = my / config.ndy0;
my1[0] = 0;
for (int i=1;i<=config.ndy0;i++) {
if (i <= my % config.ndy0)
my1[i] = my1[i - 1] + tmp + 1;
else
my1[i] = my1[i - 1] + tmp;
}
tmp = mz / config.ndz0;
mz1[0] = 0;
for (int i=1;i<=config.ndz0;i++) {
if (i <= mz % config.ndz0)
mz1[i] = mz1[i - 1] + tmp + 1;
else
mz1[i] = mz1[i - 1] + tmp;
}
//************************************************************************
for(int i=0;i<config.ndx0;i++) {
mx2[i] = mx1[i+1] - mx1[i];
if(i != 0)
mx2[i] = mx2[i] + 1;
if(i != config.ndx0-1)
mx2[i] = mx2[i] + 1;
}
for(int i=0;i<config.ndy0;i++) {
my2[i] = my1[i+1] - my1[i];
if(i != 0)
my2[i] = my2[i] + 1;
if(i != config.ndy0-1)
my2[i] = my2[i] + 1;
}
for(int i=0;i<config.ndz0;i++) {
mz2[i] = mz1[i+1] - mz1[i];
if(i != 0)
mz2[i] = mz2[i] + 1;
if(i != config.ndz0-1)
mz2[i] = mz2[i] + 1;
}
//************************************************************************
imax = mx2[0];
jmax = my2[0];
kmax = mz2[peid];
delete [] mz2;
delete [] my2;
delete [] mx2;
delete [] mz1;
delete [] my1;
delete [] mx1;
return 0;
}
/**
* @Routines for the master and workers: See the implementations below.
*
*/
int MasterRoutine(int argc, char ** argv);
int WorkerRoutine(int argc, char ** argv);
int SetOperationMode(int argc, char ** argv) {
bool matched = false;
for (int idx=0;idx<argc;idx++) {
if (!strcmp(argv[idx], OP_STR_WORKER)) {
op_mode = OP_MODE_WORKER;
matched = true;
parent_rank = atoi(argv[idx + 1]);
wid = atoi(argv[idx + 2]);
iter = atoi(argv[idx + 3]);
}
if (!strcmp(argv[idx], "-pm") && op_mode == OP_MODE_MASTER) {
max_pe = atoi(argv[idx+1]);
}
if (!strcmp(argv[idx], "-pn") && op_mode == OP_MODE_MASTER) {
pe_node = atoi(argv[idx+1]);
}
if (!strcmp(argv[idx], "-pp") && op_mode == OP_MODE_MASTER) {
pe_per_node = atoi(argv[idx+1]);
}
if (!strcmp(argv[idx], "-it")) {
iter = atoi(argv[idx+1]);
}
if (!strcmp(argv[idx], "-v") && op_mode == OP_MODE_MASTER) {
verify = true;
}
}
if (!matched)
op_mode = OP_MODE_MASTER;
}
int main(int argc, char ** argv) {
MPI_Init(&argc, &argv);
MPI_Comm_rank(MPI_COMM_WORLD, &rank);
MPI_Comm_size(MPI_COMM_WORLD, &comm_size);
SetOperationMode(argc, argv);
switch(op_mode) {
case OP_MODE_MASTER:
ret = MasterRoutine(argc, argv);
break;
case OP_MODE_WORKER:
ret = WorkerRoutine(argc, argv);
break;
}
return 0;
}
/**
* @Routine for the master:
*/
int MasterRoutine(int argc, char ** argv) {
MPI_Barrier(MPI_COMM_WORLD);
char rank_str[8], wid_str[8], iter_str[8];
snprintf(rank_str, 8, "%d", rank);
// Control PE master PE rank = 0
int count = 0;
int count_buf[3];
int child_buf[2];
int control_buf[4];
int compute_info[7];
PRECISION gosa_sum[pe_per_node];
bool end = false;
int rev_id = 0;
timeval t_b, t_e;
double t_p;
MPI_Request request;
MPI_Status status;
#if 0
// one-sided communication buffer create
MPI_Win win;
MPI_Win_create(control_buf, pe_node, sizeof(int), MPI_INFO_NULL, MPI_COMM_WORLD, &win);
#endif
// Initialize data info
CheckError( SetPreference(argc, argv));
config.mimax = (config.ndx0 == 1) ?
config.mx0 : (config.mx0 / config.ndx0) + 3;
config.mjmax = (config.ndy0 == 1) ?
config.my0 : (config.my0 / config.ndy0) + 3;
config.mkmax = (config.ndz0 == 1) ?
config.mz0 : (config.mz0 / config.ndz0) + 3;
mx = config.mx0 - 1; my = config.my0 - 1; mz = config.mz0 - 1;
if (mode == MODE_HELP) {
print_help();
exit(0);
}
memset(count_buf, 0, sizeof(count_buf));
memset(child_buf, 0, sizeof(child_buf));
memset(control_buf, 0, sizeof(control_buf));
memset(compute_info, 0, sizeof(compute_info));
if (rank == 0) {
cout << "++++++++" << endl << "PE_per_node" << pe_per_node << endl
<< "PE_Max: " << max_pe << endl
<< "PE_Node: " << pe_node << endl
<< "PE_iter: " << iter << endl
<< "++++++++" << endl;
}
if (rank == 0 ) {
// Control PE (MPE)
int idx;
gettimeofday(&t_b, NULL);
// control parameter
for (idx=0; idx<iter; idx++) {
count = 0;
memset(count_buf, 0, sizeof(count_buf));
memset(child_buf, 0, sizeof(child_buf));
memset(control_buf, 0, sizeof(control_buf));
memset(compute_info, 0, sizeof(compute_info));
memset(gosa_sum, 0, sizeof(gosa_sum));
wgosa = 0.0;
gosa = 0.0;
while (count < max_pe) {
if (idx == 0)
MPI_Recv(control_buf, 4, MPI_INT, MPI_ANY_SOURCE, 0, MPI_COMM_WORLD, &status);
else {
count_buf[1] = 1;
for (int j=1; j<pe_node; j++)
MPI_Send(count_buf, 3, MPI_INT, j, 0, MPI_COMM_WORLD);
}
#if 0
MPI_Win_fence(0, win);
cout << "==============" << endl << "Show Status: " << local_buf[i] << endl;
cout << "==============" << endl << endl;
#endif
if (idx == 0) {
rev_id = status.MPI_SOURCE;
if (rev_id == -1) {
cout << "Error: Master PE Monitor Receiving Error!" << endl;
exit(0);
}
// control_buf[0] indicates if task is completed by master PE, 2 yes, 1 no
if (control_buf[0] == 1) {
// count_buf[1] indicates control PE accept a new request of child PE
count_buf[1] = 1;
// count_buf[2] indicates the WID for new child PE
count_buf[2] = count;
// send WID
MPI_Send(count_buf, 3, MPI_INT, rev_id, 0, MPI_COMM_WORLD);
}
wid = count;
count += pe_per_node;
control_buf[0] = 0;
control_buf[1] = 0;
control_buf[2] = 0;
}
if (idx > 0)
break;
}
// receive the final dataset before ending
for (int i=1; i< pe_node; i++) {
if (idx == 0) {
// receive control_buf to have final data WID
MPI_Recv(control_buf, 4, MPI_INT, i, 0, MPI_COMM_WORLD, MPI_STATUS_IGNORE);
count_buf[1] = 2;
// send end flag to WPEs
MPI_Send(count_buf, 3, MPI_INT, i, 0, MPI_COMM_WORLD);
}
//Receive result data from WPEs
MPI_Recv(&wgosa, 1, MPI_PRECISION, i, 0, MPI_COMM_WORLD, MPI_STATUS_IGNORE);
gosa += wgosa;
}
//cout << "secs: " << sec << "/" << duration << "\xd";
cout << idx << ":" << gosa << endl;
}
} else {
MPI_Comm children_comm[18];
int wid_buf[8];
int comm_count = 0;
PRECISION tmp_gosa = 0.0;
int idx;
for (idx=0; idx<iter; idx++) {
memset(count_buf, 0, sizeof(count_buf));
memset(child_buf, 0, sizeof(child_buf));
memset(control_buf, 0, sizeof(control_buf));
memset(compute_info, 0, sizeof(compute_info));
memset(gosa_sum, 0, sizeof(gosa_sum));
tmp_gosa = 0.0;
end = false;
while (!end) {
control_buf[0] = 1;
if (idx == 0) {
// send request for WID
MPI_Send(control_buf, 4, MPI_INT, 0, 0, MPI_COMM_WORLD);
// receive WID
MPI_Recv(count_buf, 3, MPI_INT, 0, 0, MPI_COMM_WORLD, MPI_STATUS_IGNORE);
}
else {
MPI_Recv(count_buf, 3, MPI_INT, 0, 0, MPI_COMM_WORLD, MPI_STATUS_IGNORE);
}
if (count_buf[1] == 1) {
if (idx == 0) {
// set WID
wid = count_buf[2];
snprintf(wid_str, 8, "%d", count_buf[2]);
snprintf(iter_str, 8, "%d", iter);
char * c_argv[] = {const_cast<char *>("worker"), rank_str, wid_str, iter_str, NULL};
//Each of the processes in the master-job spawns a worker-job
//consisting of NUM_WORKER_PROCS processes.
int pe_spn;
if (wid+pe_per_node > max_pe)
pe_spn = max_pe - wid;
else
pe_spn = pe_per_node;
// host info
char hostname[256];
gethostname(hostname, 256);
#if 1
int offset=0;
for (int i=0;i<strlen(hostname);i++) {
if (hostname[i] == '.') {
offset = i;
break;
}
}
hostname[offset] = '\0';
#endif
MPI_Info spawn_info;
MPI_Info_create(&spawn_info);
MPI_Info_set(spawn_info, "host", hostname);
printf("pe: %d, pe_spn: %d, wid: %d\n", rank, pe_spn, wid);
MPI_Comm_spawn(argv[0], c_argv, pe_spn, spawn_info/*MPI_INFO_NULL*/,
0, MPI_COMM_SELF, &children_comm[comm_count], MPI_ERRCODES_IGNORE);
}
// send dataset and PE info
compute_info[0] = config.ndx0;
compute_info[1] = config.ndy0;
compute_info[2] = config.ndz0;
compute_info[3] = config.mx0;
compute_info[4] = config.my0;
compute_info[5] = config.mz0;
compute_info[6] = gosa;
if (idx == 0) {
// Enable children PE to begin new iteration
for (int k=0; k<pe_per_node; k++) {
if (wid+k < max_pe) {
// send dataset to children PEs
MPI_Send(compute_info, 7, MPI_INT, k, 0, children_comm[comm_count]);
}
}
wid_buf[comm_count] = wid;
for (int k=0; k<pe_per_node; k++) {
int tmpid = wid + k;
if (tmpid < max_pe) {
//Receive the message from all the corresponding workers.
MPI_Recv(&wgosa, 1, MPI_PRECISION, k, 0, children_comm[comm_count], MPI_STATUS_IGNORE);
tmp_gosa += wgosa;
}
}
comm_count++;
control_buf[1] = wid;
control_buf[2] = 1;
} else {
for (int j=0; j<comm_count; j++) {
for (int k=0; k<pe_per_node; k++) {
int tmpid = wid_buf[j] + k;
if (tmpid < max_pe) {
// send dataset to children PEs
MPI_Send(compute_info, 7, MPI_INT, k, 0, children_comm[j]);
}
}
}
for (int j=0; j<comm_count; j++) {
for (int k=0; k<pe_per_node; k++) {
int tmpid = wid_buf[j] + k;
if (tmpid < max_pe) {
//Receive the message from all the corresponding workers.
MPI_Recv(&wgosa, 1, MPI_PRECISION, k, 0, children_comm[j], MPI_STATUS_IGNORE);
tmp_gosa += wgosa;
}
}
}
count_buf[1] =2;
}
} else if (count_buf[1] == 2) {
// Send result dataset
MPI_Send(&tmp_gosa, 1, MPI_PRECISION, 0, 0, MPI_COMM_WORLD);
control_buf[2] = 0;
end = true;
}//elseif
printf("Process: %d...begin\n", rank);
if (idx > 0) {
MPI_Send(&tmp_gosa, 1, MPI_PRECISION, 0, 0, MPI_COMM_WORLD);
end = true;
}//endif
printf("Process: %d...\n", rank);
}//while
}
}
MPI_Barrier(MPI_COMM_WORLD);
if (rank == 0) {
gettimeofday(&t_e, NULL);
t_p = (t_e.tv_sec + t_e.tv_usec * 1e-6) - (t_b.tv_sec + t_b.tv_usec * 1e-6);
printf("Time: %f\n", t_p);
}
cout << "End of Program...... /" << rank << endl;
MPI_Finalize();
return 0;
}
/**
* @Routine for the workers
*/
int WorkerRoutine(int argc, char ** argv) {
MPI_Comm parent_comm;
int parent_size;
int task_buf[2];
int compute_info[7];
MPI_Comm_get_parent(&parent_comm);
if (parent_comm == MPI_COMM_NULL)
return -1;
//Attention!: The size of the inter-communicator obtained through the
// MPI_Comm_remote_size() will always be '1' since a number
// of NUM_WORKER_PROCS child processes are spawned by each
// of the master processes. Therefore, each group of the
// NUM_WORKER_PROCS child processes recognizes only their
// correspodning master process in the inter-communicator.
MPI_Comm_remote_size(parent_comm, &parent_size);
printf("\tchildren pe: %d, iter: %d\n", rank, iter);
int idx;
for (idx=0; idx<iter; idx++) {
// receive matrix info
MPI_Recv(compute_info, 7, MPI_INT, 0, 0, parent_comm, MPI_STATUS_IGNORE);
if (idx == 0) {
config.ndx0 = compute_info[0];
config.ndy0 = compute_info[1];
config.ndz0 = compute_info[2];
config.mx0 = compute_info[3];
config.my0 = compute_info[4];
config.mz0 = compute_info[5];
config.mimax = (config.ndx0 == 1) ?
config.mx0 : (config.mx0 / config.ndx0) + 3;
config.mjmax = (config.ndy0 == 1) ?
config.my0 : (config.my0 / config.ndy0) + 3;
config.mkmax = (config.ndz0 == 1) ?
config.mz0 : (config.mz0 / config.ndz0) + 3;
mx = config.mx0 - 1; my = config.my0 - 1; mz = config.mz0 - 1;
wid = wid + rank;
// data initialize
bmtInitMax(mx, my, mz, wid);
pa = new Matrix(4, config.mimax, config.mjmax, config.mkmax);
pb = new Matrix(3, config.mimax, config.mjmax, config.mkmax);
pc = new Matrix(3, config.mimax, config.mjmax, config.mkmax);
pp = new Matrix(config.mimax, config.mjmax, config.mkmax);
pwrk1 = new Matrix(config.mimax, config.mjmax, config.mkmax);
pwrk2 = new Matrix(config.mimax, config.mjmax, config.mkmax);
pbnd = new Matrix(config.mimax, config.mjmax, config.mkmax);
bmtInitMt(
*pa, *pb, *pc,
*pp, *pwrk1, *pwrk2,
*pbnd, mx, it,
config.mimax, config.mjmax, config.mkmax,
imax, jmax, kmax);
cudaError_t ce = bmtInitDeviceMemory(
pa, pb, pc,
pp, pwrk1, pwrk2,
pbnd, rank);
if (ce != cudaSuccess)
cerr << "Error: " << cudaGetErrorString(ce) << endl;
a = pa->GetPtr4D();
b = pb->GetPtr4D();
c = pc->GetPtr4D();
p = pp->GetPtr3D();
wrk1 = pwrk1->GetPtr3D();
wrk2 = pwrk2->GetPtr3D();
bnd = pbnd->GetPtr3D();
}
int deviceCnt = 0;
CheckCUDAError( cudaGetDeviceCount(&deviceCnt));
// CheckCUDAError( cudaSetDevice(rank % deviceCnt));
char hostname[128];
gethostname(hostname, 128);
cout << "\tPE:" << wid << " / " << parent_rank << " iter: " <<iter << " ["
<< hostname << "]: RUN: Device[" << rank%deviceCnt << "]" << endl;
/**********************************************************************
* Launch Kernel
*********************************************************************/
CheckCUDAError( bmtCudaJacobi(&wgosa, pp, imax, jmax, kmax));
#if 0
struct timeval time_b, time_e;
gettimeofday(&time_b, NULL);
gettimeofday(&time_e, NULL);
cout << "Kernel Time: " << (time_e.tv_usec - time_b.tv_usec)*1e-6 + ((double)time_e.tv_sec - (double)time_b.tv_sec) << endl;
#endif
/**********************************************************************
* Finalize
*********************************************************************/
#if 0
char send_buf[256];
snprintf(send_buf, 256, "I am rank %d, the worker of rank %d, own rank: %d",
wid, parent_rank, rank);
printf( "Master(%d): %s \n", parent_rank, send_buf);
#endif
task_buf[0] = 1;
MPI_Send(&wgosa, 1, MPI_PRECISION, 0, 0, parent_comm);
}
MPI_Finalize();
return 0;
}
|
40c51805d641fd6bb4da221517327199345737c7.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <stdio.h>
#include <assert.h>
// Convenience function for checking CUDA runtime API results
// can be wrapped around any runtime API call. No-op in release builds.
inline
hipError_t checkCuda(hipError_t result)
{
#if defined(DEBUG) || defined(_DEBUG)
if (result != hipSuccess) {
fprintf(stderr, "CUDA Runtime Error: %s\n", hipGetErrorString(result));
assert(result == hipSuccess);
}
#endif
return result;
}
const int TILE_DIM = 32;
const int BLOCK_ROWS = 8;
const int NUM_REPS = 100;
// Check errors and print GB/s
void postprocess(const float *ref, const float *res, int n, float ms)
{
bool passed = true;
for (int i = 0; i < n; i++)
if (res[i] != ref[i]) {
printf("%d %f %f\n", i, res[i], ref[i]);
printf("%25s\n", "*** FAILED ***");
passed = false;
break;
}
if (passed)
printf("%20.2f\n", 2 * n * sizeof(float) * 1e-6 * NUM_REPS / ms );
}
// simple copy kernel
// Used as reference case representing best effective bandwidth.
__global__ void copy(float *odata, const float *idata)
{
int x = blockIdx.x * TILE_DIM + threadIdx.x;
int y = blockIdx.y * TILE_DIM + threadIdx.y;
int width = gridDim.x * TILE_DIM;
for (int j = 0; j < TILE_DIM; j+= BLOCK_ROWS)
odata[(y+j)*width + x] = idata[(y+j)*width + x];
}
// copy kernel using shared memory
// Also used as reference case, demonstrating effect of using shared memory.
__global__ void copySharedMem(float *odata, const float *idata)
{
__shared__ float tile[TILE_DIM * TILE_DIM];
int x = blockIdx.x * TILE_DIM + threadIdx.x;
int y = blockIdx.y * TILE_DIM + threadIdx.y;
int width = gridDim.x * TILE_DIM;
for (int j = 0; j < TILE_DIM; j += BLOCK_ROWS)
tile[(threadIdx.y+j)*TILE_DIM + threadIdx.x] = idata[(y+j)*width + x];
__syncthreads();
for (int j = 0; j < TILE_DIM; j += BLOCK_ROWS)
odata[(y+j)*width + x] = tile[(threadIdx.y+j)*TILE_DIM + threadIdx.x];
}
// naive transpose
// Simplest transpose; doesn't use shared memory.
// Global memory reads are coalesced but writes are not.
__global__ void transposeNaive(float *odata, const float *idata)
{
int x = blockIdx.x * TILE_DIM + threadIdx.x;
int y = blockIdx.y * TILE_DIM + threadIdx.y;
int width = gridDim.x * TILE_DIM;
for (int j = 0; j < TILE_DIM; j+= BLOCK_ROWS)
odata[x*width + (y+j)] = idata[(y+j)*width + x];
}
// coalesced transpose
// Uses shared memory to achieve coalesing in both reads and writes
// Tile width == #banks causes shared memory bank conflicts.
__global__ void transposeCoalesced(float *odata, const float *idata)
{
__shared__ float tile[TILE_DIM][TILE_DIM];
int x = blockIdx.x * TILE_DIM + threadIdx.x;
int y = blockIdx.y * TILE_DIM + threadIdx.y;
int width = gridDim.x * TILE_DIM;
for (int j = 0; j < TILE_DIM; j += BLOCK_ROWS)
tile[threadIdx.y+j][threadIdx.x] = idata[(y+j)*width + x];
__syncthreads();
x = blockIdx.y * TILE_DIM + threadIdx.x; // transpose block offset
y = blockIdx.x * TILE_DIM + threadIdx.y;
for (int j = 0; j < TILE_DIM; j += BLOCK_ROWS)
odata[(y+j)*width + x] = tile[threadIdx.x][threadIdx.y + j];
}
// No bank-conflict transpose
// Same as transposeCoalesced except the first tile dimension is padded
// to avoid shared memory bank conflicts.
__global__ void transposeNoBankConflicts(float *odata, const float *idata)
{
__shared__ float tile[TILE_DIM][TILE_DIM+1];
int x = blockIdx.x * TILE_DIM + threadIdx.x;
int y = blockIdx.y * TILE_DIM + threadIdx.y;
int width = gridDim.x * TILE_DIM;
for (int j = 0; j < TILE_DIM; j += BLOCK_ROWS)
tile[threadIdx.y+j][threadIdx.x] = idata[(y+j)*width + x];
__syncthreads();
x = blockIdx.y * TILE_DIM + threadIdx.x; // transpose block offset
y = blockIdx.x * TILE_DIM + threadIdx.y;
for (int j = 0; j < TILE_DIM; j += BLOCK_ROWS)
odata[(y+j)*width + x] = tile[threadIdx.x][threadIdx.y + j];
}
int main(int argc, char **argv)
{
const int nx = 1024;
const int ny = 1024;
const int mem_size = nx*ny*sizeof(float);
dim3 dimGrid(nx/TILE_DIM, ny/TILE_DIM, 1);
dim3 dimBlock(TILE_DIM, BLOCK_ROWS, 1);
int devId = 0;
if (argc > 1) devId = atoi(argv[1]);
hipDeviceProp_t prop;
checkCuda( hipGetDeviceProperties(&prop, devId));
printf("\nDevice : %s\n", prop.name);
printf("Matrix size: %d %d, Block size: %d %d, Tile size: %d %d\n",
nx, ny, TILE_DIM, BLOCK_ROWS, TILE_DIM, TILE_DIM);
printf("dimGrid: %d %d %d. dimBlock: %d %d %d\n",
dimGrid.x, dimGrid.y, dimGrid.z, dimBlock.x, dimBlock.y, dimBlock.z);
checkCuda( hipSetDevice(devId) );
float *h_idata = (float*)malloc(mem_size);
float *h_cdata = (float*)malloc(mem_size);
float *h_tdata = (float*)malloc(mem_size);
float *gold = (float*)malloc(mem_size);
float *d_idata, *d_cdata, *d_tdata;
checkCuda( hipMalloc(&d_idata, mem_size) );
checkCuda( hipMalloc(&d_cdata, mem_size) );
checkCuda( hipMalloc(&d_tdata, mem_size) );
// check parameters and calculate execution configuration
if (nx % TILE_DIM || ny % TILE_DIM) {
printf("nx and ny must be a multiple of TILE_DIM\n");
goto error_exit;
}
if (TILE_DIM % BLOCK_ROWS) {
printf("TILE_DIM must be a multiple of BLOCK_ROWS\n");
goto error_exit;
}
// host
for (int j = 0; j < ny; j++)
for (int i = 0; i < nx; i++)
h_idata[j*nx + i] = j*nx + i;
// correct result for error checking
for (int j = 0; j < ny; j++)
for (int i = 0; i < nx; i++)
gold[j*nx + i] = h_idata[i*nx + j];
// device
checkCuda( hipMemcpy(d_idata, h_idata, mem_size, hipMemcpyHostToDevice) );
// events for timing
hipEvent_t startEvent, stopEvent;
checkCuda( hipEventCreate(&startEvent) );
checkCuda( hipEventCreate(&stopEvent) );
float ms;
// ------------
// time kernels
// ------------
printf("%25s%25s\n", "Routine", "Bandwidth (GB/s)");
// ----
// copy
// ----
printf("%25s", "copy");
checkCuda( hipMemset(d_cdata, 0, mem_size) );
// warm up
hipLaunchKernelGGL(( copy), dim3(dimGrid), dim3(dimBlock), 0, 0, d_cdata, d_idata);
checkCuda( hipEventRecord(startEvent, 0) );
for (int i = 0; i < NUM_REPS; i++)
hipLaunchKernelGGL(( copy), dim3(dimGrid), dim3(dimBlock), 0, 0, d_cdata, d_idata);
checkCuda( hipEventRecord(stopEvent, 0) );
checkCuda( hipEventSynchronize(stopEvent) );
checkCuda( hipEventElapsedTime(&ms, startEvent, stopEvent) );
checkCuda( hipMemcpy(h_cdata, d_cdata, mem_size, hipMemcpyDeviceToHost) );
postprocess(h_idata, h_cdata, nx*ny, ms);
// -------------
// copySharedMem
// -------------
printf("%25s", "shared memory copy");
checkCuda( hipMemset(d_cdata, 0, mem_size) );
// warm up
hipLaunchKernelGGL(( copySharedMem), dim3(dimGrid), dim3(dimBlock), 0, 0, d_cdata, d_idata);
checkCuda( hipEventRecord(startEvent, 0) );
for (int i = 0; i < NUM_REPS; i++)
hipLaunchKernelGGL(( copySharedMem), dim3(dimGrid), dim3(dimBlock), 0, 0, d_cdata, d_idata);
checkCuda( hipEventRecord(stopEvent, 0) );
checkCuda( hipEventSynchronize(stopEvent) );
checkCuda( hipEventElapsedTime(&ms, startEvent, stopEvent) );
checkCuda( hipMemcpy(h_cdata, d_cdata, mem_size, hipMemcpyDeviceToHost) );
postprocess(h_idata, h_cdata, nx * ny, ms);
// --------------
// transposeNaive
// --------------
printf("%25s", "naive transpose");
checkCuda( hipMemset(d_tdata, 0, mem_size) );
// warmup
hipLaunchKernelGGL(( transposeNaive), dim3(dimGrid), dim3(dimBlock), 0, 0, d_tdata, d_idata);
checkCuda( hipEventRecord(startEvent, 0) );
for (int i = 0; i < NUM_REPS; i++)
hipLaunchKernelGGL(( transposeNaive), dim3(dimGrid), dim3(dimBlock), 0, 0, d_tdata, d_idata);
checkCuda( hipEventRecord(stopEvent, 0) );
checkCuda( hipEventSynchronize(stopEvent) );
checkCuda( hipEventElapsedTime(&ms, startEvent, stopEvent) );
checkCuda( hipMemcpy(h_tdata, d_tdata, mem_size, hipMemcpyDeviceToHost) );
postprocess(gold, h_tdata, nx * ny, ms);
// ------------------
// transposeCoalesced
// ------------------
printf("%25s", "coalesced transpose");
checkCuda( hipMemset(d_tdata, 0, mem_size) );
// warmup
hipLaunchKernelGGL(( transposeCoalesced), dim3(dimGrid), dim3(dimBlock), 0, 0, d_tdata, d_idata);
checkCuda( hipEventRecord(startEvent, 0) );
for (int i = 0; i < NUM_REPS; i++)
hipLaunchKernelGGL(( transposeCoalesced), dim3(dimGrid), dim3(dimBlock), 0, 0, d_tdata, d_idata);
checkCuda( hipEventRecord(stopEvent, 0) );
checkCuda( hipEventSynchronize(stopEvent) );
checkCuda( hipEventElapsedTime(&ms, startEvent, stopEvent) );
checkCuda( hipMemcpy(h_tdata, d_tdata, mem_size, hipMemcpyDeviceToHost) );
postprocess(gold, h_tdata, nx * ny, ms);
// ------------------------
// transposeNoBankConflicts
// ------------------------
printf("%25s", "conflict-free transpose");
checkCuda( hipMemset(d_tdata, 0, mem_size) );
// warmup
hipLaunchKernelGGL(( transposeNoBankConflicts), dim3(dimGrid), dim3(dimBlock), 0, 0, d_tdata, d_idata);
checkCuda( hipEventRecord(startEvent, 0) );
for (int i = 0; i < NUM_REPS; i++)
hipLaunchKernelGGL(( transposeNoBankConflicts), dim3(dimGrid), dim3(dimBlock), 0, 0, d_tdata, d_idata);
checkCuda( hipEventRecord(stopEvent, 0) );
checkCuda( hipEventSynchronize(stopEvent) );
checkCuda( hipEventElapsedTime(&ms, startEvent, stopEvent) );
checkCuda( hipMemcpy(h_tdata, d_tdata, mem_size, hipMemcpyDeviceToHost) );
postprocess(gold, h_tdata, nx * ny, ms);
error_exit:
// cleanup
checkCuda( hipEventDestroy(startEvent) );
checkCuda( hipEventDestroy(stopEvent) );
checkCuda( hipFree(d_tdata) );
checkCuda( hipFree(d_cdata) );
checkCuda( hipFree(d_idata) );
free(h_idata);
free(h_tdata);
free(h_cdata);
free(gold);
} | 40c51805d641fd6bb4da221517327199345737c7.cu | #include <stdio.h>
#include <assert.h>
// Convenience function for checking CUDA runtime API results
// can be wrapped around any runtime API call. No-op in release builds.
inline
cudaError_t checkCuda(cudaError_t result)
{
#if defined(DEBUG) || defined(_DEBUG)
if (result != cudaSuccess) {
fprintf(stderr, "CUDA Runtime Error: %s\n", cudaGetErrorString(result));
assert(result == cudaSuccess);
}
#endif
return result;
}
const int TILE_DIM = 32;
const int BLOCK_ROWS = 8;
const int NUM_REPS = 100;
// Check errors and print GB/s
void postprocess(const float *ref, const float *res, int n, float ms)
{
bool passed = true;
for (int i = 0; i < n; i++)
if (res[i] != ref[i]) {
printf("%d %f %f\n", i, res[i], ref[i]);
printf("%25s\n", "*** FAILED ***");
passed = false;
break;
}
if (passed)
printf("%20.2f\n", 2 * n * sizeof(float) * 1e-6 * NUM_REPS / ms );
}
// simple copy kernel
// Used as reference case representing best effective bandwidth.
__global__ void copy(float *odata, const float *idata)
{
int x = blockIdx.x * TILE_DIM + threadIdx.x;
int y = blockIdx.y * TILE_DIM + threadIdx.y;
int width = gridDim.x * TILE_DIM;
for (int j = 0; j < TILE_DIM; j+= BLOCK_ROWS)
odata[(y+j)*width + x] = idata[(y+j)*width + x];
}
// copy kernel using shared memory
// Also used as reference case, demonstrating effect of using shared memory.
__global__ void copySharedMem(float *odata, const float *idata)
{
__shared__ float tile[TILE_DIM * TILE_DIM];
int x = blockIdx.x * TILE_DIM + threadIdx.x;
int y = blockIdx.y * TILE_DIM + threadIdx.y;
int width = gridDim.x * TILE_DIM;
for (int j = 0; j < TILE_DIM; j += BLOCK_ROWS)
tile[(threadIdx.y+j)*TILE_DIM + threadIdx.x] = idata[(y+j)*width + x];
__syncthreads();
for (int j = 0; j < TILE_DIM; j += BLOCK_ROWS)
odata[(y+j)*width + x] = tile[(threadIdx.y+j)*TILE_DIM + threadIdx.x];
}
// naive transpose
// Simplest transpose; doesn't use shared memory.
// Global memory reads are coalesced but writes are not.
__global__ void transposeNaive(float *odata, const float *idata)
{
int x = blockIdx.x * TILE_DIM + threadIdx.x;
int y = blockIdx.y * TILE_DIM + threadIdx.y;
int width = gridDim.x * TILE_DIM;
for (int j = 0; j < TILE_DIM; j+= BLOCK_ROWS)
odata[x*width + (y+j)] = idata[(y+j)*width + x];
}
// coalesced transpose
// Uses shared memory to achieve coalesing in both reads and writes
// Tile width == #banks causes shared memory bank conflicts.
__global__ void transposeCoalesced(float *odata, const float *idata)
{
__shared__ float tile[TILE_DIM][TILE_DIM];
int x = blockIdx.x * TILE_DIM + threadIdx.x;
int y = blockIdx.y * TILE_DIM + threadIdx.y;
int width = gridDim.x * TILE_DIM;
for (int j = 0; j < TILE_DIM; j += BLOCK_ROWS)
tile[threadIdx.y+j][threadIdx.x] = idata[(y+j)*width + x];
__syncthreads();
x = blockIdx.y * TILE_DIM + threadIdx.x; // transpose block offset
y = blockIdx.x * TILE_DIM + threadIdx.y;
for (int j = 0; j < TILE_DIM; j += BLOCK_ROWS)
odata[(y+j)*width + x] = tile[threadIdx.x][threadIdx.y + j];
}
// No bank-conflict transpose
// Same as transposeCoalesced except the first tile dimension is padded
// to avoid shared memory bank conflicts.
__global__ void transposeNoBankConflicts(float *odata, const float *idata)
{
__shared__ float tile[TILE_DIM][TILE_DIM+1];
int x = blockIdx.x * TILE_DIM + threadIdx.x;
int y = blockIdx.y * TILE_DIM + threadIdx.y;
int width = gridDim.x * TILE_DIM;
for (int j = 0; j < TILE_DIM; j += BLOCK_ROWS)
tile[threadIdx.y+j][threadIdx.x] = idata[(y+j)*width + x];
__syncthreads();
x = blockIdx.y * TILE_DIM + threadIdx.x; // transpose block offset
y = blockIdx.x * TILE_DIM + threadIdx.y;
for (int j = 0; j < TILE_DIM; j += BLOCK_ROWS)
odata[(y+j)*width + x] = tile[threadIdx.x][threadIdx.y + j];
}
int main(int argc, char **argv)
{
const int nx = 1024;
const int ny = 1024;
const int mem_size = nx*ny*sizeof(float);
dim3 dimGrid(nx/TILE_DIM, ny/TILE_DIM, 1);
dim3 dimBlock(TILE_DIM, BLOCK_ROWS, 1);
int devId = 0;
if (argc > 1) devId = atoi(argv[1]);
cudaDeviceProp prop;
checkCuda( cudaGetDeviceProperties(&prop, devId));
printf("\nDevice : %s\n", prop.name);
printf("Matrix size: %d %d, Block size: %d %d, Tile size: %d %d\n",
nx, ny, TILE_DIM, BLOCK_ROWS, TILE_DIM, TILE_DIM);
printf("dimGrid: %d %d %d. dimBlock: %d %d %d\n",
dimGrid.x, dimGrid.y, dimGrid.z, dimBlock.x, dimBlock.y, dimBlock.z);
checkCuda( cudaSetDevice(devId) );
float *h_idata = (float*)malloc(mem_size);
float *h_cdata = (float*)malloc(mem_size);
float *h_tdata = (float*)malloc(mem_size);
float *gold = (float*)malloc(mem_size);
float *d_idata, *d_cdata, *d_tdata;
checkCuda( cudaMalloc(&d_idata, mem_size) );
checkCuda( cudaMalloc(&d_cdata, mem_size) );
checkCuda( cudaMalloc(&d_tdata, mem_size) );
// check parameters and calculate execution configuration
if (nx % TILE_DIM || ny % TILE_DIM) {
printf("nx and ny must be a multiple of TILE_DIM\n");
goto error_exit;
}
if (TILE_DIM % BLOCK_ROWS) {
printf("TILE_DIM must be a multiple of BLOCK_ROWS\n");
goto error_exit;
}
// host
for (int j = 0; j < ny; j++)
for (int i = 0; i < nx; i++)
h_idata[j*nx + i] = j*nx + i;
// correct result for error checking
for (int j = 0; j < ny; j++)
for (int i = 0; i < nx; i++)
gold[j*nx + i] = h_idata[i*nx + j];
// device
checkCuda( cudaMemcpy(d_idata, h_idata, mem_size, cudaMemcpyHostToDevice) );
// events for timing
cudaEvent_t startEvent, stopEvent;
checkCuda( cudaEventCreate(&startEvent) );
checkCuda( cudaEventCreate(&stopEvent) );
float ms;
// ------------
// time kernels
// ------------
printf("%25s%25s\n", "Routine", "Bandwidth (GB/s)");
// ----
// copy
// ----
printf("%25s", "copy");
checkCuda( cudaMemset(d_cdata, 0, mem_size) );
// warm up
copy<<<dimGrid, dimBlock>>>(d_cdata, d_idata);
checkCuda( cudaEventRecord(startEvent, 0) );
for (int i = 0; i < NUM_REPS; i++)
copy<<<dimGrid, dimBlock>>>(d_cdata, d_idata);
checkCuda( cudaEventRecord(stopEvent, 0) );
checkCuda( cudaEventSynchronize(stopEvent) );
checkCuda( cudaEventElapsedTime(&ms, startEvent, stopEvent) );
checkCuda( cudaMemcpy(h_cdata, d_cdata, mem_size, cudaMemcpyDeviceToHost) );
postprocess(h_idata, h_cdata, nx*ny, ms);
// -------------
// copySharedMem
// -------------
printf("%25s", "shared memory copy");
checkCuda( cudaMemset(d_cdata, 0, mem_size) );
// warm up
copySharedMem<<<dimGrid, dimBlock>>>(d_cdata, d_idata);
checkCuda( cudaEventRecord(startEvent, 0) );
for (int i = 0; i < NUM_REPS; i++)
copySharedMem<<<dimGrid, dimBlock>>>(d_cdata, d_idata);
checkCuda( cudaEventRecord(stopEvent, 0) );
checkCuda( cudaEventSynchronize(stopEvent) );
checkCuda( cudaEventElapsedTime(&ms, startEvent, stopEvent) );
checkCuda( cudaMemcpy(h_cdata, d_cdata, mem_size, cudaMemcpyDeviceToHost) );
postprocess(h_idata, h_cdata, nx * ny, ms);
// --------------
// transposeNaive
// --------------
printf("%25s", "naive transpose");
checkCuda( cudaMemset(d_tdata, 0, mem_size) );
// warmup
transposeNaive<<<dimGrid, dimBlock>>>(d_tdata, d_idata);
checkCuda( cudaEventRecord(startEvent, 0) );
for (int i = 0; i < NUM_REPS; i++)
transposeNaive<<<dimGrid, dimBlock>>>(d_tdata, d_idata);
checkCuda( cudaEventRecord(stopEvent, 0) );
checkCuda( cudaEventSynchronize(stopEvent) );
checkCuda( cudaEventElapsedTime(&ms, startEvent, stopEvent) );
checkCuda( cudaMemcpy(h_tdata, d_tdata, mem_size, cudaMemcpyDeviceToHost) );
postprocess(gold, h_tdata, nx * ny, ms);
// ------------------
// transposeCoalesced
// ------------------
printf("%25s", "coalesced transpose");
checkCuda( cudaMemset(d_tdata, 0, mem_size) );
// warmup
transposeCoalesced<<<dimGrid, dimBlock>>>(d_tdata, d_idata);
checkCuda( cudaEventRecord(startEvent, 0) );
for (int i = 0; i < NUM_REPS; i++)
transposeCoalesced<<<dimGrid, dimBlock>>>(d_tdata, d_idata);
checkCuda( cudaEventRecord(stopEvent, 0) );
checkCuda( cudaEventSynchronize(stopEvent) );
checkCuda( cudaEventElapsedTime(&ms, startEvent, stopEvent) );
checkCuda( cudaMemcpy(h_tdata, d_tdata, mem_size, cudaMemcpyDeviceToHost) );
postprocess(gold, h_tdata, nx * ny, ms);
// ------------------------
// transposeNoBankConflicts
// ------------------------
printf("%25s", "conflict-free transpose");
checkCuda( cudaMemset(d_tdata, 0, mem_size) );
// warmup
transposeNoBankConflicts<<<dimGrid, dimBlock>>>(d_tdata, d_idata);
checkCuda( cudaEventRecord(startEvent, 0) );
for (int i = 0; i < NUM_REPS; i++)
transposeNoBankConflicts<<<dimGrid, dimBlock>>>(d_tdata, d_idata);
checkCuda( cudaEventRecord(stopEvent, 0) );
checkCuda( cudaEventSynchronize(stopEvent) );
checkCuda( cudaEventElapsedTime(&ms, startEvent, stopEvent) );
checkCuda( cudaMemcpy(h_tdata, d_tdata, mem_size, cudaMemcpyDeviceToHost) );
postprocess(gold, h_tdata, nx * ny, ms);
error_exit:
// cleanup
checkCuda( cudaEventDestroy(startEvent) );
checkCuda( cudaEventDestroy(stopEvent) );
checkCuda( cudaFree(d_tdata) );
checkCuda( cudaFree(d_cdata) );
checkCuda( cudaFree(d_idata) );
free(h_idata);
free(h_tdata);
free(h_cdata);
free(gold);
} |
8c5469393bb3f8cbe91c70cc0a3f9b61344344dc.hip | // !!! This is a file automatically generated by hipify!!!
// Copyright 2019 Google LLC. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// https://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
#include "simulator_testfixture.h"
#include <rocblas.h>
#include <custatevec.h>
#include <type_traits>
#include "gtest/gtest.h"
#include "../lib/simulator_custatevec.h"
namespace qsim {
template <class T>
class SimulatorCuStateVecTest : public testing::Test {};
using fp_impl = ::testing::Types<float, double>;
TYPED_TEST_SUITE(SimulatorCuStateVecTest, fp_impl);
template <typename fp_type>
struct Factory {
using Simulator = qsim::SimulatorCuStateVec<fp_type>;
using StateSpace = typename Simulator::StateSpace;
Factory() {
ErrorCheck(hipblasCreate(&cublas_handle));
ErrorCheck(custatevecCreate(&custatevec_handle));
}
~Factory() {
ErrorCheck(hipblasDestroy(cublas_handle));
ErrorCheck(custatevecDestroy(custatevec_handle));
}
StateSpace CreateStateSpace() const {
return StateSpace(cublas_handle, custatevec_handle);
}
Simulator CreateSimulator() const {
return Simulator(custatevec_handle);
}
hipblasHandle_t cublas_handle;
custatevecHandle_t custatevec_handle;
};
TYPED_TEST(SimulatorCuStateVecTest, ApplyGate1) {
TestApplyGate1(qsim::Factory<TypeParam>());
}
TYPED_TEST(SimulatorCuStateVecTest, ApplyGate2) {
TestApplyGate2(qsim::Factory<TypeParam>());
}
TYPED_TEST(SimulatorCuStateVecTest, ApplyGate3) {
TestApplyGate3(qsim::Factory<TypeParam>());
}
TYPED_TEST(SimulatorCuStateVecTest, ApplyGate5) {
TestApplyGate5(qsim::Factory<TypeParam>());
}
TYPED_TEST(SimulatorCuStateVecTest, CircuitWithControlledGates) {
TestCircuitWithControlledGates(qsim::Factory<TypeParam>());
}
TYPED_TEST(SimulatorCuStateVecTest, CircuitWithControlledGatesDagger) {
TestCircuitWithControlledGatesDagger(qsim::Factory<TypeParam>());
}
TYPED_TEST(SimulatorCuStateVecTest, MultiQubitGates) {
TestMultiQubitGates(qsim::Factory<TypeParam>());
}
TYPED_TEST(SimulatorCuStateVecTest, ControlledGates) {
bool high_precision = std::is_same<TypeParam, double>::value;
TestControlledGates(qsim::Factory<TypeParam>(), high_precision);
}
TYPED_TEST(SimulatorCuStateVecTest, ExpectationValue1) {
TestExpectationValue1(qsim::Factory<TypeParam>());
}
TYPED_TEST(SimulatorCuStateVecTest, ExpectationValue2) {
TestExpectationValue2(qsim::Factory<TypeParam>());
}
} // namespace qsim
int main(int argc, char** argv) {
::testing::InitGoogleTest(&argc, argv);
return RUN_ALL_TESTS();
}
| 8c5469393bb3f8cbe91c70cc0a3f9b61344344dc.cu | // Copyright 2019 Google LLC. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// https://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
#include "simulator_testfixture.h"
#include <cublas_v2.h>
#include <custatevec.h>
#include <type_traits>
#include "gtest/gtest.h"
#include "../lib/simulator_custatevec.h"
namespace qsim {
template <class T>
class SimulatorCuStateVecTest : public testing::Test {};
using fp_impl = ::testing::Types<float, double>;
TYPED_TEST_SUITE(SimulatorCuStateVecTest, fp_impl);
template <typename fp_type>
struct Factory {
using Simulator = qsim::SimulatorCuStateVec<fp_type>;
using StateSpace = typename Simulator::StateSpace;
Factory() {
ErrorCheck(cublasCreate(&cublas_handle));
ErrorCheck(custatevecCreate(&custatevec_handle));
}
~Factory() {
ErrorCheck(cublasDestroy(cublas_handle));
ErrorCheck(custatevecDestroy(custatevec_handle));
}
StateSpace CreateStateSpace() const {
return StateSpace(cublas_handle, custatevec_handle);
}
Simulator CreateSimulator() const {
return Simulator(custatevec_handle);
}
cublasHandle_t cublas_handle;
custatevecHandle_t custatevec_handle;
};
TYPED_TEST(SimulatorCuStateVecTest, ApplyGate1) {
TestApplyGate1(qsim::Factory<TypeParam>());
}
TYPED_TEST(SimulatorCuStateVecTest, ApplyGate2) {
TestApplyGate2(qsim::Factory<TypeParam>());
}
TYPED_TEST(SimulatorCuStateVecTest, ApplyGate3) {
TestApplyGate3(qsim::Factory<TypeParam>());
}
TYPED_TEST(SimulatorCuStateVecTest, ApplyGate5) {
TestApplyGate5(qsim::Factory<TypeParam>());
}
TYPED_TEST(SimulatorCuStateVecTest, CircuitWithControlledGates) {
TestCircuitWithControlledGates(qsim::Factory<TypeParam>());
}
TYPED_TEST(SimulatorCuStateVecTest, CircuitWithControlledGatesDagger) {
TestCircuitWithControlledGatesDagger(qsim::Factory<TypeParam>());
}
TYPED_TEST(SimulatorCuStateVecTest, MultiQubitGates) {
TestMultiQubitGates(qsim::Factory<TypeParam>());
}
TYPED_TEST(SimulatorCuStateVecTest, ControlledGates) {
bool high_precision = std::is_same<TypeParam, double>::value;
TestControlledGates(qsim::Factory<TypeParam>(), high_precision);
}
TYPED_TEST(SimulatorCuStateVecTest, ExpectationValue1) {
TestExpectationValue1(qsim::Factory<TypeParam>());
}
TYPED_TEST(SimulatorCuStateVecTest, ExpectationValue2) {
TestExpectationValue2(qsim::Factory<TypeParam>());
}
} // namespace qsim
int main(int argc, char** argv) {
::testing::InitGoogleTest(&argc, argv);
return RUN_ALL_TESTS();
}
|
983b52d9f51dce1fd7809d607397151bf966c0bd.hip | // !!! This is a file automatically generated by hipify!!!
/*
* Copyright (c) 2019-2020, NVIDIA CORPORATION. All rights reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
* DEALINGS IN THE SOFTWARE.
*
* Licensed under the MIT License.
*/
#include <hip/hip_runtime.h>
#include <hip/hip_runtime.h>
#include <device_launch_parameters.h>
#include <typeinfo>
#include <hip/hip_fp16.h>
#include <hiprand/hiprand_kernel.h>
#include "inc/Core/Common/cuda/params.h"
#include "inc/Core/Common/cuda/TPtree.hxx"
/*****************************************************************************************
* Count the number of points assigned to each leaf
*****************************************************************************************/
__global__ void count_leaf_sizes(LeafNode* leafs, int* node_ids, int N, int internal_nodes) {
int leaf_id;
for (int i = blockIdx.x*blockDim.x + threadIdx.x; i < N; i += blockDim.x*gridDim.x) {
leaf_id = node_ids[i] - internal_nodes;
atomicAdd(&leafs[leaf_id].size, 1);
}
}
/*****************************************************************************************
* Assign each point to a leaf node (based on its node_id when creating the tptree). Also
* computes the size and offset of each leaf node for easy permutation.
*****************************************************************************************/
__global__ void assign_leaf_points(LeafNode* leafs, int* leaf_points, int* node_ids, int N, int internal_nodes) {
int leaf_id;
int idx;
for (int i = blockIdx.x*blockDim.x + threadIdx.x; i < N; i += blockDim.x*gridDim.x) {
leaf_id = node_ids[i] - internal_nodes;
idx = atomicAdd(&leafs[leaf_id].size, 1);
leaf_points[idx + leafs[leaf_id].offset] = i;
}
}
__global__ void assign_leaf_points_in_batch(LeafNode* leafs, int* leaf_points, int* node_ids, int N, int internal_nodes, int min_id, int max_id) {
int leaf_id;
int idx;
for (int i = min_id + blockIdx.x*blockDim.x + threadIdx.x; i < max_id; i += blockDim.x*gridDim.x) {
leaf_id = node_ids[i] - internal_nodes;
idx = atomicAdd(&leafs[leaf_id].size, 1);
leaf_points[idx + leafs[leaf_id].offset] = i;
}
}
__global__ void assign_leaf_points_out_batch(LeafNode* leafs, int* leaf_points, int* node_ids, int N, int internal_nodes, int min_id, int max_id) {
int leaf_id;
int idx;
for (int i = blockIdx.x*blockDim.x + threadIdx.x; i < min_id; i += blockDim.x*gridDim.x) {
leaf_id = node_ids[i] - internal_nodes;
idx = atomicAdd(&leafs[leaf_id].size, 1);
leaf_points[idx + leafs[leaf_id].offset] = i;
}
for (int i = max_id + blockIdx.x*blockDim.x + threadIdx.x; i < N; i += blockDim.x*gridDim.x) {
leaf_id = node_ids[i] - internal_nodes;
idx = atomicAdd(&leafs[leaf_id].size, 1);
leaf_points[idx + leafs[leaf_id].offset] = i;
}
}
//#define BAL 2 // Balance factor - will only rebalance nodes that are at least 2x larger than their sibling
// Computes the fraction of points that need to be moved from each unbalanced node on the level
__global__ void check_for_imbalance(int* node_ids, int* node_sizes, int nodes_on_level, int node_start, float* frac_to_move, int bal_factor) {
int neighborId;
for(int i=node_start + blockIdx.x*blockDim.x + threadIdx.x; i<node_start+nodes_on_level; i+=blockDim.x*gridDim.x) {
frac_to_move[i] = 0.0;
neighborId = (i-1) + 2*(i&1); // neighbor is either left or right of current
if(node_sizes[i] > bal_factor*node_sizes[neighborId]) {
frac_to_move[i] = ((float)node_sizes[i] - (((float)(node_sizes[i]+node_sizes[neighborId]))/2.0)) / (float)node_sizes[i];
}
}
}
// Initialize random number generator for each thread
__global__ void initialize_rands(hiprandState_t* states, int iter) {
int id = threadIdx.x + blockIdx.x*blockDim.x;
hiprand_init(1234, id, iter, &states[id]);
}
// Randomly move points to sibling nodes based on the fraction that need to be moved out of unbalanced nodes
__global__ void rebalance_nodes(int* node_ids, int N, float* frac_to_move, hiprandState_t* states) {
int neighborId;
int threadId = blockIdx.x*blockDim.x+threadIdx.x;
for(int i=threadId; i<N; i+=blockDim.x*gridDim.x) {
if((frac_to_move[node_ids[i]] > 0.0) && (hiprand_uniform(&states[threadId]) < frac_to_move[node_ids[i]])) {
neighborId = (node_ids[i]-1) + 2*(node_ids[i]&1); // Compute idx of left or right neighbor
node_ids[i] = neighborId;
}
}
}
| 983b52d9f51dce1fd7809d607397151bf966c0bd.cu | /*
* Copyright (c) 2019-2020, NVIDIA CORPORATION. All rights reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
* DEALINGS IN THE SOFTWARE.
*
* Licensed under the MIT License.
*/
#include <cuda.h>
#include <cuda_runtime.h>
#include <device_launch_parameters.h>
#include <typeinfo>
#include <cuda_fp16.h>
#include <curand_kernel.h>
#include "inc/Core/Common/cuda/params.h"
#include "inc/Core/Common/cuda/TPtree.hxx"
/*****************************************************************************************
* Count the number of points assigned to each leaf
*****************************************************************************************/
__global__ void count_leaf_sizes(LeafNode* leafs, int* node_ids, int N, int internal_nodes) {
int leaf_id;
for (int i = blockIdx.x*blockDim.x + threadIdx.x; i < N; i += blockDim.x*gridDim.x) {
leaf_id = node_ids[i] - internal_nodes;
atomicAdd(&leafs[leaf_id].size, 1);
}
}
/*****************************************************************************************
* Assign each point to a leaf node (based on its node_id when creating the tptree). Also
* computes the size and offset of each leaf node for easy permutation.
*****************************************************************************************/
__global__ void assign_leaf_points(LeafNode* leafs, int* leaf_points, int* node_ids, int N, int internal_nodes) {
int leaf_id;
int idx;
for (int i = blockIdx.x*blockDim.x + threadIdx.x; i < N; i += blockDim.x*gridDim.x) {
leaf_id = node_ids[i] - internal_nodes;
idx = atomicAdd(&leafs[leaf_id].size, 1);
leaf_points[idx + leafs[leaf_id].offset] = i;
}
}
__global__ void assign_leaf_points_in_batch(LeafNode* leafs, int* leaf_points, int* node_ids, int N, int internal_nodes, int min_id, int max_id) {
int leaf_id;
int idx;
for (int i = min_id + blockIdx.x*blockDim.x + threadIdx.x; i < max_id; i += blockDim.x*gridDim.x) {
leaf_id = node_ids[i] - internal_nodes;
idx = atomicAdd(&leafs[leaf_id].size, 1);
leaf_points[idx + leafs[leaf_id].offset] = i;
}
}
__global__ void assign_leaf_points_out_batch(LeafNode* leafs, int* leaf_points, int* node_ids, int N, int internal_nodes, int min_id, int max_id) {
int leaf_id;
int idx;
for (int i = blockIdx.x*blockDim.x + threadIdx.x; i < min_id; i += blockDim.x*gridDim.x) {
leaf_id = node_ids[i] - internal_nodes;
idx = atomicAdd(&leafs[leaf_id].size, 1);
leaf_points[idx + leafs[leaf_id].offset] = i;
}
for (int i = max_id + blockIdx.x*blockDim.x + threadIdx.x; i < N; i += blockDim.x*gridDim.x) {
leaf_id = node_ids[i] - internal_nodes;
idx = atomicAdd(&leafs[leaf_id].size, 1);
leaf_points[idx + leafs[leaf_id].offset] = i;
}
}
//#define BAL 2 // Balance factor - will only rebalance nodes that are at least 2x larger than their sibling
// Computes the fraction of points that need to be moved from each unbalanced node on the level
__global__ void check_for_imbalance(int* node_ids, int* node_sizes, int nodes_on_level, int node_start, float* frac_to_move, int bal_factor) {
int neighborId;
for(int i=node_start + blockIdx.x*blockDim.x + threadIdx.x; i<node_start+nodes_on_level; i+=blockDim.x*gridDim.x) {
frac_to_move[i] = 0.0;
neighborId = (i-1) + 2*(i&1); // neighbor is either left or right of current
if(node_sizes[i] > bal_factor*node_sizes[neighborId]) {
frac_to_move[i] = ((float)node_sizes[i] - (((float)(node_sizes[i]+node_sizes[neighborId]))/2.0)) / (float)node_sizes[i];
}
}
}
// Initialize random number generator for each thread
__global__ void initialize_rands(curandState* states, int iter) {
int id = threadIdx.x + blockIdx.x*blockDim.x;
curand_init(1234, id, iter, &states[id]);
}
// Randomly move points to sibling nodes based on the fraction that need to be moved out of unbalanced nodes
__global__ void rebalance_nodes(int* node_ids, int N, float* frac_to_move, curandState* states) {
int neighborId;
int threadId = blockIdx.x*blockDim.x+threadIdx.x;
for(int i=threadId; i<N; i+=blockDim.x*gridDim.x) {
if((frac_to_move[node_ids[i]] > 0.0) && (curand_uniform(&states[threadId]) < frac_to_move[node_ids[i]])) {
neighborId = (node_ids[i]-1) + 2*(node_ids[i]&1); // Compute idx of left or right neighbor
node_ids[i] = neighborId;
}
}
}
|
cb81512e828f019a1d7f93d74e8991cae71963dc.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
// ---------------------------------------------------------------------------
// File: mnist_mlp.cu
// MLP BNN inference source file for MNIST.
// ---------------------------------------------------------------------------
// See our arXiv paper for detail: https://arxiv.org/abs/2006.16578
// Ang Li, Scientist, Pacific Northwest National Laboratory(PNNL), U.S.
// Homepage: http://www.angliphd.com
// GitHub repo: http://www.github.com/pnnl/TCBNN
// PNNL-IPID: 31925-E, ECCN: EAR99, IR: PNNL-SA-152850
// BSD Lincese.
// Richland, 99352, WA, USA. June-30-2020.
// ---------------------------------------------------------------------------
#include <stdio.h>
#include <assert.h>
#include <iostream>
#include <string>
#include <hip/hip_cooperative_groups.h>
#include <iostream>
#include <fstream>
#include <vector>
#include "utility.h"
#include "param.h"
#include "kernel_hip.cuh"
#include "data.h"
using namespace cooperative_groups;
using namespace std;
#ifdef NEWFMT
__global__ void mnist_mlp(In128LayerParam* bin, Fc128LayerParam* fc1, Fc128LayerParam* fc2,
Fc128LayerParam* fc3, Out128LayerParam* bout)
{
//SET_KERNEL_TIMER;
grid_group grid = this_grid();
//========= Input ============
In128LayerFMT(bin);
grid.sync();
//TICK_KERNEL_TIMER(bin);
//========== FC1 ============
Fc128LayerFMT(fc1);
grid.sync();
//TICK_KERNEL_TIMER(fc1);
//========== FC2 ============
Fc128LayerFMT(fc2);
grid.sync();
//TICK_KERNEL_TIMER(fc2);
////========== FC3 ============
Fc128LayerFMT(fc3);
grid.sync();
//TICK_KERNEL_TIMER(fc3);
//========== Output ===========
Out128LayerFMT(bout);
//grid.sync();
//TICK_KERNEL_TIMER(bout);
}
#else
__global__ void mnist_mlp(In128LayerParam* bin, Fc128LayerParam* fc1, Fc128LayerParam* fc2,
Fc128LayerParam* fc3, Out128LayerParam* bout)
{
grid_group grid = this_grid();
//========= Input ============
In128Layer(bin);
grid.sync();
//========== FC1 ============
Fc128Layer(fc1);
grid.sync();
//========== FC2 ============
Fc128Layer(fc2);
grid.sync();
////========== FC3 ============
Fc128Layer(fc3);
grid.sync();
////========== Output ===========
Out128Layer(bout);
}
#endif
int main()
{
//=============== Configuration =================
int dev = 0;
hipSetDevice(dev);
const unsigned batch = 32768;
const unsigned output_size = 10;
const unsigned n_hidden = 1024;
const unsigned image_height = 28;
const unsigned image_width = 28;
const unsigned image_size = image_height*image_width;
//=============== Get Input and Label =================
string mnist_dir = "/home/lian599/data/mnist/t10k-images-idx3-ubyte";
float* images = NULL;
SAFE_ALOC_HOST(images, image_height*image_width*batch*sizeof(float));
string mnist_label = "/home/lian599/data/mnist/t10k-labels-idx1-ubyte";
unsigned* image_labels = NULL;
SAFE_ALOC_HOST(image_labels, batch*sizeof(unsigned));
read_MNIST_normalized(mnist_dir, mnist_label, images, image_labels, batch);
//================ Get Weight =================
FILE* config_file = fopen("./mlp_mnist.csv","r");
//================ Set Network =================
//Input Layer
In128LayerParam* bin = new In128LayerParam("Fin", batch, image_size);
In128LayerParam* bin_gpu = bin->initialize(images);
//Fc1 Layer
Fc128LayerParam* bfc1 = new Fc128LayerParam("Fc1", batch, image_size, n_hidden);
Fc128LayerParam* bfc1_gpu = bfc1->initialize(config_file, bin->get_output_gpu());
//Fc2 Layer
Fc128LayerParam* bfc2 = new Fc128LayerParam("Fc2", batch, n_hidden, n_hidden);
Fc128LayerParam* bfc2_gpu = bfc2->initialize(config_file, bfc1->get_output_gpu());
//Fc3 Layer
Fc128LayerParam* bfc3 = new Fc128LayerParam("Fc3", batch, n_hidden, n_hidden);
Fc128LayerParam* bfc3_gpu = bfc3->initialize(config_file, bfc2->get_output_gpu());
//Out Layer
Out128LayerParam* bout = new Out128LayerParam("Fout", batch, n_hidden, output_size);
Out128LayerParam* bout_gpu = bout->initialize(config_file, bfc3->get_output_gpu());
//Out128LayerParam* bout = new Out128LayerParam("Fout", batch, n_hidden, output_size);
//Out128LayerParam* bout_gpu = bout->initialize(config_file, bfc1->get_output_gpu());
//================ Setup Kernel =================
int numThreads = 1024;
hipDeviceProp_t deviceProp;
hipGetDeviceProperties(&deviceProp, dev);
int numBlocksPerSm;
int shared_memory = 64*sizeof(int)*32;
hipFuncSetAttribute(mnist_mlp, hipFuncAttributeMaxDynamicSharedMemorySize, shared_memory);
hipOccupancyMaxActiveBlocksPerMultiprocessor(&numBlocksPerSm, mnist_mlp, numThreads, shared_memory);
void* args[] = {&bin_gpu, &bfc1_gpu, &bfc2_gpu, &bfc3_gpu, &bout_gpu};
START_TIMER;
hipLaunchCooperativeKernel((void*)mnist_mlp, numBlocksPerSm*deviceProp.multiProcessorCount,
numThreads, args, shared_memory);
//mnist_mlp<<<numBlocksPerSm*deviceProp.multiProcessorCount, numThreads>>>(bin_gpu,
//bfc1_gpu, bfc2_gpu, bfc3_gpu, bout_gpu);
STOP_TIMER;
CUDA_CHECK_KERNEL();
//================ Output =================
float* output = bout->download_output();
//validate_prediction(output, image_labels, output_size, batch);
/*
for (int i=0; i<100; i++)
{
printf("%.3f ", output[i]);
if ((i+1)%10==0) printf("\n");
}
printf("\n");
float* out = bfc1->download_full_output();
for (int i=0; i<256; i++)
{
printf("%.0f ", out[i]);
if ((i+1)%16==0) printf("\n");
}
printf("\n");
*/
/*
printf("\n====\n");
float* out = bfc1->download_full_output();
for (int i=0; i<100; i++)
{
printf("%.0f ", out[i]);
if ((i+1)%10==0) printf("\n");
}
printf("\n");
printf("\n=OO===\n");
unsigned* out1 = bfc1->download_output();
for (int i=0; i<(4*(bfc1->output_bit_size())); i++)
{
printf("%x ", out1[i]);
if ((i+1)%10==0) printf("\n");
}
printf("\n");
*/
/*
uin32* dump_weight = NULL;
SAFE_ALOC_HOST(dump_weight, bout->weight_bit_bytes())
CUDA_SAFE_CALL( hipMemcpy(dump_weight, bout->weight_gpu, bout->weight_bit_bytes(),
hipMemcpyDeviceToHost));
for (int i=0; i<32; i++)
{
printf("%x ", dump_weight[i]);
if ((i+1)%16==0) printf("\n");
}
*/
//================ Release =================
delete bin;
delete bfc1;
delete bfc2;
delete bfc3;
delete bout;
SAFE_FREE_HOST(image_labels);
SAFE_FREE_HOST(images);
return 0;
}
| cb81512e828f019a1d7f93d74e8991cae71963dc.cu | // ---------------------------------------------------------------------------
// File: mnist_mlp.cu
// MLP BNN inference source file for MNIST.
// ---------------------------------------------------------------------------
// See our arXiv paper for detail: https://arxiv.org/abs/2006.16578
// Ang Li, Scientist, Pacific Northwest National Laboratory(PNNL), U.S.
// Homepage: http://www.angliphd.com
// GitHub repo: http://www.github.com/pnnl/TCBNN
// PNNL-IPID: 31925-E, ECCN: EAR99, IR: PNNL-SA-152850
// BSD Lincese.
// Richland, 99352, WA, USA. June-30-2020.
// ---------------------------------------------------------------------------
#include <stdio.h>
#include <assert.h>
#include <iostream>
#include <string>
#include <cooperative_groups.h>
#include <iostream>
#include <fstream>
#include <vector>
#include "utility.h"
#include "param.h"
#include "kernel.cuh"
#include "data.h"
using namespace cooperative_groups;
using namespace std;
#ifdef NEWFMT
__global__ void mnist_mlp(In128LayerParam* bin, Fc128LayerParam* fc1, Fc128LayerParam* fc2,
Fc128LayerParam* fc3, Out128LayerParam* bout)
{
//SET_KERNEL_TIMER;
grid_group grid = this_grid();
//========= Input ============
In128LayerFMT(bin);
grid.sync();
//TICK_KERNEL_TIMER(bin);
//========== FC1 ============
Fc128LayerFMT(fc1);
grid.sync();
//TICK_KERNEL_TIMER(fc1);
//========== FC2 ============
Fc128LayerFMT(fc2);
grid.sync();
//TICK_KERNEL_TIMER(fc2);
////========== FC3 ============
Fc128LayerFMT(fc3);
grid.sync();
//TICK_KERNEL_TIMER(fc3);
//========== Output ===========
Out128LayerFMT(bout);
//grid.sync();
//TICK_KERNEL_TIMER(bout);
}
#else
__global__ void mnist_mlp(In128LayerParam* bin, Fc128LayerParam* fc1, Fc128LayerParam* fc2,
Fc128LayerParam* fc3, Out128LayerParam* bout)
{
grid_group grid = this_grid();
//========= Input ============
In128Layer(bin);
grid.sync();
//========== FC1 ============
Fc128Layer(fc1);
grid.sync();
//========== FC2 ============
Fc128Layer(fc2);
grid.sync();
////========== FC3 ============
Fc128Layer(fc3);
grid.sync();
////========== Output ===========
Out128Layer(bout);
}
#endif
int main()
{
//=============== Configuration =================
int dev = 0;
cudaSetDevice(dev);
const unsigned batch = 32768;
const unsigned output_size = 10;
const unsigned n_hidden = 1024;
const unsigned image_height = 28;
const unsigned image_width = 28;
const unsigned image_size = image_height*image_width;
//=============== Get Input and Label =================
string mnist_dir = "/home/lian599/data/mnist/t10k-images-idx3-ubyte";
float* images = NULL;
SAFE_ALOC_HOST(images, image_height*image_width*batch*sizeof(float));
string mnist_label = "/home/lian599/data/mnist/t10k-labels-idx1-ubyte";
unsigned* image_labels = NULL;
SAFE_ALOC_HOST(image_labels, batch*sizeof(unsigned));
read_MNIST_normalized(mnist_dir, mnist_label, images, image_labels, batch);
//================ Get Weight =================
FILE* config_file = fopen("./mlp_mnist.csv","r");
//================ Set Network =================
//Input Layer
In128LayerParam* bin = new In128LayerParam("Fin", batch, image_size);
In128LayerParam* bin_gpu = bin->initialize(images);
//Fc1 Layer
Fc128LayerParam* bfc1 = new Fc128LayerParam("Fc1", batch, image_size, n_hidden);
Fc128LayerParam* bfc1_gpu = bfc1->initialize(config_file, bin->get_output_gpu());
//Fc2 Layer
Fc128LayerParam* bfc2 = new Fc128LayerParam("Fc2", batch, n_hidden, n_hidden);
Fc128LayerParam* bfc2_gpu = bfc2->initialize(config_file, bfc1->get_output_gpu());
//Fc3 Layer
Fc128LayerParam* bfc3 = new Fc128LayerParam("Fc3", batch, n_hidden, n_hidden);
Fc128LayerParam* bfc3_gpu = bfc3->initialize(config_file, bfc2->get_output_gpu());
//Out Layer
Out128LayerParam* bout = new Out128LayerParam("Fout", batch, n_hidden, output_size);
Out128LayerParam* bout_gpu = bout->initialize(config_file, bfc3->get_output_gpu());
//Out128LayerParam* bout = new Out128LayerParam("Fout", batch, n_hidden, output_size);
//Out128LayerParam* bout_gpu = bout->initialize(config_file, bfc1->get_output_gpu());
//================ Setup Kernel =================
int numThreads = 1024;
cudaDeviceProp deviceProp;
cudaGetDeviceProperties(&deviceProp, dev);
int numBlocksPerSm;
int shared_memory = 64*sizeof(int)*32;
cudaFuncSetAttribute(mnist_mlp, cudaFuncAttributeMaxDynamicSharedMemorySize, shared_memory);
cudaOccupancyMaxActiveBlocksPerMultiprocessor(&numBlocksPerSm, mnist_mlp, numThreads, shared_memory);
void* args[] = {&bin_gpu, &bfc1_gpu, &bfc2_gpu, &bfc3_gpu, &bout_gpu};
START_TIMER;
cudaLaunchCooperativeKernel((void*)mnist_mlp, numBlocksPerSm*deviceProp.multiProcessorCount,
numThreads, args, shared_memory);
//mnist_mlp<<<numBlocksPerSm*deviceProp.multiProcessorCount, numThreads>>>(bin_gpu,
//bfc1_gpu, bfc2_gpu, bfc3_gpu, bout_gpu);
STOP_TIMER;
CUDA_CHECK_KERNEL();
//================ Output =================
float* output = bout->download_output();
//validate_prediction(output, image_labels, output_size, batch);
/*
for (int i=0; i<100; i++)
{
printf("%.3f ", output[i]);
if ((i+1)%10==0) printf("\n");
}
printf("\n");
float* out = bfc1->download_full_output();
for (int i=0; i<256; i++)
{
printf("%.0f ", out[i]);
if ((i+1)%16==0) printf("\n");
}
printf("\n");
*/
/*
printf("\n====\n");
float* out = bfc1->download_full_output();
for (int i=0; i<100; i++)
{
printf("%.0f ", out[i]);
if ((i+1)%10==0) printf("\n");
}
printf("\n");
printf("\n=OO===\n");
unsigned* out1 = bfc1->download_output();
for (int i=0; i<(4*(bfc1->output_bit_size())); i++)
{
printf("%x ", out1[i]);
if ((i+1)%10==0) printf("\n");
}
printf("\n");
*/
/*
uin32* dump_weight = NULL;
SAFE_ALOC_HOST(dump_weight, bout->weight_bit_bytes())
CUDA_SAFE_CALL( cudaMemcpy(dump_weight, bout->weight_gpu, bout->weight_bit_bytes(),
cudaMemcpyDeviceToHost));
for (int i=0; i<32; i++)
{
printf("%x ", dump_weight[i]);
if ((i+1)%16==0) printf("\n");
}
*/
//================ Release =================
delete bin;
delete bfc1;
delete bfc2;
delete bfc3;
delete bout;
SAFE_FREE_HOST(image_labels);
SAFE_FREE_HOST(images);
return 0;
}
|
ef24c447a28536c2d838f16170a6ead95a322d3a.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/**
* @File main.cu
*
* The main file of the project
*
* Paraleln programovn na GPU (PCG 2020)
* Projekt c. 1 (cuda)
* Login: xpavel34
*/
#include <sys/time.h>
#include <cstdio>
#include <cmath>
#include <sstream>
#include <unistd.h>
#include "nbody.h"
#include "h5Helper.h"
#include "wrappers.cuh"
/**
* Main rotine
* @param argc
* @param argv
* @return
*/
int main(int argc, char **argv) {
// Time measurement
struct timeval t1{}, t2{};
if (argc != 10) {
printf("Usage: nbody <N> <dt> <steps> <threads/block> <write intesity> <reduction threads> <reduction threads/block> <input> <output>\n");
exit(1);
}
// Number of particles
const int N = std::stoi(argv[1]);
// Length of time step
const float dt = std::stof(argv[2]);
// Number of steps
const size_t steps = std::stoi(argv[3]);
// Number of thread blocks
const int thr_blc = std::stoi(argv[4]);
// Write frequency
int writeFreq = std::stoi(argv[5]);
// number of reduction threads
const int red_thr = std::stoi(argv[6]);
// Number of reduction threads/blocks
const int red_thr_blc = std::stoi(argv[7]);
// Size of the simulation CUDA gird - number of blocks
const size_t simulationGrid = (N + thr_blc - 1) / thr_blc;
// Size of the reduction CUDA grid - number of blocks
const size_t reductionGrid = (red_thr + red_thr_blc - 1) / red_thr_blc;
// Log benchmark setup
printf("N: %d\n", N);
printf("dt: %f\n", dt);
printf("steps: %zu\n", steps);
printf("threads/block: %d\n", thr_blc);
printf("blocks/grid: %lu\n", simulationGrid);
printf("reduction threads/block: %d\n", red_thr_blc);
printf("reduction blocks/grid: %lu\n", reductionGrid);
const size_t recordsNum = (writeFreq > 0) ? (steps + writeFreq - 1) / writeFreq : 0;
writeFreq = (writeFreq > 0) ? writeFreq : 0;
size_t particleCountRounded = roundUp(N, 32);
size_t memberArrayByteSize = particleCountRounded * sizeof(float);
size_t bytesTotal = memberArrayByteSize * t_particles_member_count;
CudaHostMemoryPool<float> particlesHostPool(bytesTotal, hipHostMallocWriteCombined);
t_particles particles_cpu{
.positionsX = particlesHostPool.data(),
.positionsY = &particlesHostPool.data()[particleCountRounded],
.positionsZ = &particlesHostPool.data()[particleCountRounded * 2],
.velocitiesX = &particlesHostPool.data()[particleCountRounded * 3],
.velocitiesY = &particlesHostPool.data()[particleCountRounded * 4],
.velocitiesZ = &particlesHostPool.data()[particleCountRounded * 5],
.weights = &particlesHostPool.data()[particleCountRounded * 6]
};
MemDesc md(
particles_cpu.positionsX, 1, 0, // Postition in X
particles_cpu.positionsY, 1, 0, // Postition in Y
particles_cpu.positionsZ, 1, 0, // Postition in Z
particles_cpu.velocitiesX, 1, 0, // Velocity in X
particles_cpu.velocitiesY, 1, 0, // Velocity in Y
particles_cpu.velocitiesZ, 1, 0, // Velocity in Z
particles_cpu.weights, 1, 0, // Weight
N, // Number of particles
recordsNum); // Number of records in output file
// Initialisation of helper class and loading of input data
auto outputFile = std::string(argv[9]);
H5Helper h5Helper(argv[8], outputFile, md);
try {
h5Helper.init();
h5Helper.readParticleData();
}
catch (const std::exception &e) {
std::cerr << e.what() << std::endl;
return -1;
}
memberArrayByteSize = particleCountRounded * sizeof(float);
bytesTotal = memberArrayByteSize * t_particles_member_count;
std::vector<CudaDeviceMemoryPool<float>> particleDevicePools;
particleDevicePools.emplace_back(bytesTotal);
particleDevicePools.emplace_back(bytesTotal);
std::vector<t_particles> particles_gpu(2);
for (auto i = 0; i < particleDevicePools.size(); i++) {
particles_gpu[i] = {
.positionsX = particleDevicePools[i].data(),
.positionsY = &particleDevicePools[i].data()[particleCountRounded],
.positionsZ = &particleDevicePools[i].data()[particleCountRounded * 2],
.velocitiesX = &particleDevicePools[i].data()[particleCountRounded * 3],
.velocitiesY = &particleDevicePools[i].data()[particleCountRounded * 4],
.velocitiesZ = &particleDevicePools[i].data()[particleCountRounded * 5],
.weights = &particleDevicePools[i].data()[particleCountRounded * 6]
};
}
CUDA_CHECK(hipMemcpy(particleDevicePools[0].data(), particlesHostPool.data(), particlesHostPool.byteSize,
hipMemcpyHostToDevice));
CUDA_CHECK(hipMemcpy(particleDevicePools[1].data(), particlesHostPool.data(), particlesHostPool.byteSize,
hipMemcpyHostToDevice));
gettimeofday(&t1, 0);
dim3 blockSize(thr_blc);
dim3 gridSize(simulationGrid);
size_t smSizeVelocity = t_particles_member_count * sizeof(float) * blockSize.x;
CudaHostMemoryPool<float4> com_cpu(sizeof(float4), hipHostMallocDefault);
CudaDeviceMemoryPool<float4> com_gpu(sizeof(float4));
CudaDeviceMemoryPool<int> lock_gpu(sizeof(int));
com_gpu.Memset(0);
lock_gpu.Memset(0);
size_t warpsPerBlock = (red_thr_blc + 32 - 1) / 32;
size_t smSizeCOM = 4 * sizeof(float) * warpsPerBlock;
CudaStream velocityComputeStream, comComputeStream, transferStream;
CudaEvent particleTransferFinished, comTransferFinished;
CudaEvent velocityComputeFinished, comComputeFinished;
size_t recordID = 0;
for (size_t s = 0; s < steps; s++) {
hipLaunchKernelGGL(( calculate_velocity), dim3(gridSize), dim3(blockSize), smSizeVelocity, velocityComputeStream.data(),
particles_gpu[s & 1ul],
particles_gpu[(s + 1) & 1ul],
N,
dt
);
hipLaunchKernelGGL(( centerOfMass), dim3(reductionGrid), dim3(red_thr_blc), smSizeCOM, comComputeStream.data(),
particles_gpu[s & 1ul],
&com_gpu.data()[0].x,
&com_gpu.data()[0].y,
&com_gpu.data()[0].z,
&com_gpu.data()[0].w,
lock_gpu.data(),
N
);
lock_gpu.MemsetAsync(0, comComputeStream.data());
comComputeFinished.Record(comComputeStream); /// Signal finished COM computation
if (writeFreq > 0 && (s % writeFreq == 0)) {
/// Wait for compute from previous iteration to finish
transferStream.WaitEvent(velocityComputeFinished);
/// Transfer current input particle data
CUDA_CHECK(hipMemcpyAsync(particlesHostPool.data(), particleDevicePools[s & 1ul].data(),
particlesHostPool.byteSize,
hipMemcpyDeviceToHost,
transferStream.data()));
particleTransferFinished.Record(transferStream); /// Signal finished velocity transfer
/// Compute must be done before we transfer COM data
transferStream.WaitEvent(comComputeFinished);
CUDA_CHECK(hipMemcpyAsync(com_cpu.data(), com_gpu.data(),
com_cpu.byteSize,
hipMemcpyDeviceToHost,
transferStream.data()));
com_gpu.MemsetAsync(0, transferStream.data());
comTransferFinished.Record(transferStream); /// Signal finished COM transfer
hipEventSynchronize(particleTransferFinished.data());
h5Helper.writeParticleData(recordID);
hipEventSynchronize(comTransferFinished.data());
h5Helper.writeCom(com_cpu.data()->x, com_cpu.data()->y, com_cpu.data()->z, com_cpu.data()->w, recordID);
recordID++;
}
else {
com_gpu.MemsetAsync(0, comComputeStream.data());
}
velocityComputeFinished.Record(velocityComputeStream); /// Signal finished velocity computation
/// Synchronize both compute streams across loop iterations, i.e.,
/// both compute streams must wait for each other to finish current iteration
velocityComputeStream.WaitEvent(comComputeFinished);
comComputeStream.WaitEvent(velocityComputeFinished);
}
/// Calculate COM from the last output data (not done in the previous loop)
CUDA_CHECK(hipDeviceSynchronize());
com_gpu.Memset(0);
lock_gpu.Memset(0);
hipLaunchKernelGGL(( centerOfMass), dim3(reductionGrid), dim3(red_thr_blc), smSizeCOM, 0, particles_gpu[steps & 1ul],
&com_gpu.data()[0].x,
&com_gpu.data()[0].y,
&com_gpu.data()[0].z,
&com_gpu.data()[0].w,
lock_gpu.data(),
N);
CUDA_CHECK(hipDeviceSynchronize());
gettimeofday(&t2, 0);
// Approximate simulation wall time
double t = (1000000.0 * (t2.tv_sec - t1.tv_sec) + t2.tv_usec - t1.tv_usec) / 1000000.0;
printf("Time: %f s\n", t);
if (writeFreq == 0) {
}
CUDA_CHECK(hipMemcpy(particlesHostPool.data(),
particleDevicePools[steps & 1ul].data(),
particlesHostPool.byteSize,
hipMemcpyDeviceToHost));
CUDA_CHECK(hipMemcpy(com_cpu.data(), com_gpu.data(), com_cpu.byteSize, hipMemcpyDeviceToHost));
float4 comOnGPU{
com_cpu.data()->x,
com_cpu.data()->y,
com_cpu.data()->z,
com_cpu.data()->w
};
float4 comOnCPU = centerOfMassCPU(md);
std::cout << "Center of mass on CPU:" << std::endl
<< comOnCPU.x << ", "
<< comOnCPU.y << ", "
<< comOnCPU.z << ", "
<< comOnCPU.w
<< std::endl;
std::cout << "Center of mass on GPU:" << std::endl
<< comOnGPU.x << ", "
<< comOnGPU.y << ", "
<< comOnGPU.z << ", "
<< comOnGPU.w
<< std::endl;
// Writing final values to the file
h5Helper.writeComFinal(comOnGPU.x, comOnGPU.y, comOnGPU.z, comOnGPU.w);
h5Helper.writeParticleDataFinal();
return 0;
}// end of main
//----------------------------------------------------------------------------------------------------------------------
| ef24c447a28536c2d838f16170a6ead95a322d3a.cu | /**
* @File main.cu
*
* The main file of the project
*
* Paralelní programování na GPU (PCG 2020)
* Projekt c. 1 (cuda)
* Login: xpavel34
*/
#include <sys/time.h>
#include <cstdio>
#include <cmath>
#include <sstream>
#include <unistd.h>
#include "nbody.h"
#include "h5Helper.h"
#include "wrappers.cuh"
/**
* Main rotine
* @param argc
* @param argv
* @return
*/
int main(int argc, char **argv) {
// Time measurement
struct timeval t1{}, t2{};
if (argc != 10) {
printf("Usage: nbody <N> <dt> <steps> <threads/block> <write intesity> <reduction threads> <reduction threads/block> <input> <output>\n");
exit(1);
}
// Number of particles
const int N = std::stoi(argv[1]);
// Length of time step
const float dt = std::stof(argv[2]);
// Number of steps
const size_t steps = std::stoi(argv[3]);
// Number of thread blocks
const int thr_blc = std::stoi(argv[4]);
// Write frequency
int writeFreq = std::stoi(argv[5]);
// number of reduction threads
const int red_thr = std::stoi(argv[6]);
// Number of reduction threads/blocks
const int red_thr_blc = std::stoi(argv[7]);
// Size of the simulation CUDA gird - number of blocks
const size_t simulationGrid = (N + thr_blc - 1) / thr_blc;
// Size of the reduction CUDA grid - number of blocks
const size_t reductionGrid = (red_thr + red_thr_blc - 1) / red_thr_blc;
// Log benchmark setup
printf("N: %d\n", N);
printf("dt: %f\n", dt);
printf("steps: %zu\n", steps);
printf("threads/block: %d\n", thr_blc);
printf("blocks/grid: %lu\n", simulationGrid);
printf("reduction threads/block: %d\n", red_thr_blc);
printf("reduction blocks/grid: %lu\n", reductionGrid);
const size_t recordsNum = (writeFreq > 0) ? (steps + writeFreq - 1) / writeFreq : 0;
writeFreq = (writeFreq > 0) ? writeFreq : 0;
size_t particleCountRounded = roundUp(N, 32);
size_t memberArrayByteSize = particleCountRounded * sizeof(float);
size_t bytesTotal = memberArrayByteSize * t_particles_member_count;
CudaHostMemoryPool<float> particlesHostPool(bytesTotal, cudaHostAllocWriteCombined);
t_particles particles_cpu{
.positionsX = particlesHostPool.data(),
.positionsY = &particlesHostPool.data()[particleCountRounded],
.positionsZ = &particlesHostPool.data()[particleCountRounded * 2],
.velocitiesX = &particlesHostPool.data()[particleCountRounded * 3],
.velocitiesY = &particlesHostPool.data()[particleCountRounded * 4],
.velocitiesZ = &particlesHostPool.data()[particleCountRounded * 5],
.weights = &particlesHostPool.data()[particleCountRounded * 6]
};
MemDesc md(
particles_cpu.positionsX, 1, 0, // Postition in X
particles_cpu.positionsY, 1, 0, // Postition in Y
particles_cpu.positionsZ, 1, 0, // Postition in Z
particles_cpu.velocitiesX, 1, 0, // Velocity in X
particles_cpu.velocitiesY, 1, 0, // Velocity in Y
particles_cpu.velocitiesZ, 1, 0, // Velocity in Z
particles_cpu.weights, 1, 0, // Weight
N, // Number of particles
recordsNum); // Number of records in output file
// Initialisation of helper class and loading of input data
auto outputFile = std::string(argv[9]);
H5Helper h5Helper(argv[8], outputFile, md);
try {
h5Helper.init();
h5Helper.readParticleData();
}
catch (const std::exception &e) {
std::cerr << e.what() << std::endl;
return -1;
}
memberArrayByteSize = particleCountRounded * sizeof(float);
bytesTotal = memberArrayByteSize * t_particles_member_count;
std::vector<CudaDeviceMemoryPool<float>> particleDevicePools;
particleDevicePools.emplace_back(bytesTotal);
particleDevicePools.emplace_back(bytesTotal);
std::vector<t_particles> particles_gpu(2);
for (auto i = 0; i < particleDevicePools.size(); i++) {
particles_gpu[i] = {
.positionsX = particleDevicePools[i].data(),
.positionsY = &particleDevicePools[i].data()[particleCountRounded],
.positionsZ = &particleDevicePools[i].data()[particleCountRounded * 2],
.velocitiesX = &particleDevicePools[i].data()[particleCountRounded * 3],
.velocitiesY = &particleDevicePools[i].data()[particleCountRounded * 4],
.velocitiesZ = &particleDevicePools[i].data()[particleCountRounded * 5],
.weights = &particleDevicePools[i].data()[particleCountRounded * 6]
};
}
CUDA_CHECK(cudaMemcpy(particleDevicePools[0].data(), particlesHostPool.data(), particlesHostPool.byteSize,
cudaMemcpyHostToDevice));
CUDA_CHECK(cudaMemcpy(particleDevicePools[1].data(), particlesHostPool.data(), particlesHostPool.byteSize,
cudaMemcpyHostToDevice));
gettimeofday(&t1, 0);
dim3 blockSize(thr_blc);
dim3 gridSize(simulationGrid);
size_t smSizeVelocity = t_particles_member_count * sizeof(float) * blockSize.x;
CudaHostMemoryPool<float4> com_cpu(sizeof(float4), cudaHostAllocDefault);
CudaDeviceMemoryPool<float4> com_gpu(sizeof(float4));
CudaDeviceMemoryPool<int> lock_gpu(sizeof(int));
com_gpu.Memset(0);
lock_gpu.Memset(0);
size_t warpsPerBlock = (red_thr_blc + 32 - 1) / 32;
size_t smSizeCOM = 4 * sizeof(float) * warpsPerBlock;
CudaStream velocityComputeStream, comComputeStream, transferStream;
CudaEvent particleTransferFinished, comTransferFinished;
CudaEvent velocityComputeFinished, comComputeFinished;
size_t recordID = 0;
for (size_t s = 0; s < steps; s++) {
calculate_velocity<<<gridSize, blockSize, smSizeVelocity, velocityComputeStream.data()>>>(
particles_gpu[s & 1ul],
particles_gpu[(s + 1) & 1ul],
N,
dt
);
centerOfMass<<<reductionGrid, red_thr_blc, smSizeCOM, comComputeStream.data()>>>(
particles_gpu[s & 1ul],
&com_gpu.data()[0].x,
&com_gpu.data()[0].y,
&com_gpu.data()[0].z,
&com_gpu.data()[0].w,
lock_gpu.data(),
N
);
lock_gpu.MemsetAsync(0, comComputeStream.data());
comComputeFinished.Record(comComputeStream); /// Signal finished COM computation
if (writeFreq > 0 && (s % writeFreq == 0)) {
/// Wait for compute from previous iteration to finish
transferStream.WaitEvent(velocityComputeFinished);
/// Transfer current input particle data
CUDA_CHECK(cudaMemcpyAsync(particlesHostPool.data(), particleDevicePools[s & 1ul].data(),
particlesHostPool.byteSize,
cudaMemcpyDeviceToHost,
transferStream.data()));
particleTransferFinished.Record(transferStream); /// Signal finished velocity transfer
/// Compute must be done before we transfer COM data
transferStream.WaitEvent(comComputeFinished);
CUDA_CHECK(cudaMemcpyAsync(com_cpu.data(), com_gpu.data(),
com_cpu.byteSize,
cudaMemcpyDeviceToHost,
transferStream.data()));
com_gpu.MemsetAsync(0, transferStream.data());
comTransferFinished.Record(transferStream); /// Signal finished COM transfer
cudaEventSynchronize(particleTransferFinished.data());
h5Helper.writeParticleData(recordID);
cudaEventSynchronize(comTransferFinished.data());
h5Helper.writeCom(com_cpu.data()->x, com_cpu.data()->y, com_cpu.data()->z, com_cpu.data()->w, recordID);
recordID++;
}
else {
com_gpu.MemsetAsync(0, comComputeStream.data());
}
velocityComputeFinished.Record(velocityComputeStream); /// Signal finished velocity computation
/// Synchronize both compute streams across loop iterations, i.e.,
/// both compute streams must wait for each other to finish current iteration
velocityComputeStream.WaitEvent(comComputeFinished);
comComputeStream.WaitEvent(velocityComputeFinished);
}
/// Calculate COM from the last output data (not done in the previous loop)
CUDA_CHECK(cudaDeviceSynchronize());
com_gpu.Memset(0);
lock_gpu.Memset(0);
centerOfMass<<<reductionGrid, red_thr_blc, smSizeCOM>>>(particles_gpu[steps & 1ul],
&com_gpu.data()[0].x,
&com_gpu.data()[0].y,
&com_gpu.data()[0].z,
&com_gpu.data()[0].w,
lock_gpu.data(),
N);
CUDA_CHECK(cudaDeviceSynchronize());
gettimeofday(&t2, 0);
// Approximate simulation wall time
double t = (1000000.0 * (t2.tv_sec - t1.tv_sec) + t2.tv_usec - t1.tv_usec) / 1000000.0;
printf("Time: %f s\n", t);
if (writeFreq == 0) {
}
CUDA_CHECK(cudaMemcpy(particlesHostPool.data(),
particleDevicePools[steps & 1ul].data(),
particlesHostPool.byteSize,
cudaMemcpyDeviceToHost));
CUDA_CHECK(cudaMemcpy(com_cpu.data(), com_gpu.data(), com_cpu.byteSize, cudaMemcpyDeviceToHost));
float4 comOnGPU{
com_cpu.data()->x,
com_cpu.data()->y,
com_cpu.data()->z,
com_cpu.data()->w
};
float4 comOnCPU = centerOfMassCPU(md);
std::cout << "Center of mass on CPU:" << std::endl
<< comOnCPU.x << ", "
<< comOnCPU.y << ", "
<< comOnCPU.z << ", "
<< comOnCPU.w
<< std::endl;
std::cout << "Center of mass on GPU:" << std::endl
<< comOnGPU.x << ", "
<< comOnGPU.y << ", "
<< comOnGPU.z << ", "
<< comOnGPU.w
<< std::endl;
// Writing final values to the file
h5Helper.writeComFinal(comOnGPU.x, comOnGPU.y, comOnGPU.z, comOnGPU.w);
h5Helper.writeParticleDataFinal();
return 0;
}// end of main
//----------------------------------------------------------------------------------------------------------------------
|
ce58bbe255b4218b3ff87865a03bb110af599c6c.hip | // !!! This is a file automatically generated by hipify!!!
#include <gtest/gtest.h>
#include <vector>
#include <thrust/device_vector.h>
#include <thrust/sequence.h>
#include "../../../../src/tree/gpu_hist/row_partitioner.cuh"
#include "../../helpers.h"
namespace xgboost {
namespace tree {
void TestSortPosition(const std::vector<int>& position_in, int left_idx,
int right_idx) {
dh::safe_cuda(hipSetDevice(0));
std::vector<int64_t> left_count = {
std::count(position_in.begin(), position_in.end(), left_idx)};
dh::caching_device_vector<int64_t> d_left_count = left_count;
dh::caching_device_vector<int> position = position_in;
dh::caching_device_vector<int> position_out(position.size());
dh::caching_device_vector<RowPartitioner::RowIndexT> ridx(position.size());
thrust::sequence(ridx.begin(), ridx.end());
dh::caching_device_vector<RowPartitioner::RowIndexT> ridx_out(ridx.size());
RowPartitioner rp(0,10);
rp.SortPosition(
common::Span<int>(position.data().get(), position.size()),
common::Span<int>(position_out.data().get(), position_out.size()),
common::Span<RowPartitioner::RowIndexT>(ridx.data().get(), ridx.size()),
common::Span<RowPartitioner::RowIndexT>(ridx_out.data().get(), ridx_out.size()), left_idx,
right_idx, d_left_count.data().get(), nullptr);
thrust::host_vector<int> position_result = position_out;
thrust::host_vector<int> ridx_result = ridx_out;
// Check position is sorted
EXPECT_TRUE(std::is_sorted(position_result.begin(), position_result.end()));
// Check row indices are sorted inside left and right segment
EXPECT_TRUE(
std::is_sorted(ridx_result.begin(), ridx_result.begin() + left_count[0]));
EXPECT_TRUE(
std::is_sorted(ridx_result.begin() + left_count[0], ridx_result.end()));
// Check key value pairs are the same
for (auto i = 0ull; i < ridx_result.size(); i++) {
EXPECT_EQ(position_result[i], position_in[ridx_result[i]]);
}
}
TEST(GpuHist, SortPosition) {
TestSortPosition({1, 2, 1, 2, 1}, 1, 2);
TestSortPosition({1, 1, 1, 1}, 1, 2);
TestSortPosition({2, 2, 2, 2}, 1, 2);
TestSortPosition({1, 2, 1, 2, 3}, 1, 2);
}
void TestUpdatePosition() {
const int kNumRows = 10;
RowPartitioner rp(0, kNumRows);
auto rows = rp.GetRowsHost(0);
EXPECT_EQ(rows.size(), kNumRows);
for (auto i = 0ull; i < kNumRows; i++) {
EXPECT_EQ(rows[i], i);
}
// Send the first five training instances to the right node
// and the second 5 to the left node
rp.UpdatePosition(0, 1, 2,
[=] __device__(RowPartitioner::RowIndexT ridx) {
if (ridx > 4) {
return 1;
}
else {
return 2;
}
});
rows = rp.GetRowsHost(1);
for (auto r : rows) {
EXPECT_GT(r, 4);
}
rows = rp.GetRowsHost(2);
for (auto r : rows) {
EXPECT_LT(r, 5);
}
// Split the left node again
rp.UpdatePosition(1, 3, 4, [=]__device__(RowPartitioner::RowIndexT ridx)
{
if (ridx < 7) {
return 3
;
}
return 4;
});
EXPECT_EQ(rp.GetRows(3).size(), 2);
EXPECT_EQ(rp.GetRows(4).size(), 3);
// Check position is as expected
EXPECT_EQ(rp.GetPositionHost(), std::vector<bst_node_t>({3,3,4,4,4,2,2,2,2,2}));
}
TEST(RowPartitioner, Basic) { TestUpdatePosition(); }
void TestFinalise() {
const int kNumRows = 10;
RowPartitioner rp(0, kNumRows);
rp.FinalisePosition([=]__device__(RowPartitioner::RowIndexT ridx, int position)
{
return 7;
});
auto position = rp.GetPositionHost();
for(auto p:position)
{
EXPECT_EQ(p, 7);
}
}
TEST(RowPartitioner, Finalise) { TestFinalise(); }
void TestIncorrectRow() {
RowPartitioner rp(0, 1);
rp.UpdatePosition(0, 1, 2, [=]__device__ (RowPartitioner::RowIndexT ridx)
{
return 4; // This is not the left branch or the right branch
});
}
TEST(RowPartitioner, IncorrectRow) {
ASSERT_DEATH({ TestIncorrectRow(); },".*");
}
} // namespace tree
} // namespace xgboost
| ce58bbe255b4218b3ff87865a03bb110af599c6c.cu | #include <gtest/gtest.h>
#include <vector>
#include <thrust/device_vector.h>
#include <thrust/sequence.h>
#include "../../../../src/tree/gpu_hist/row_partitioner.cuh"
#include "../../helpers.h"
namespace xgboost {
namespace tree {
void TestSortPosition(const std::vector<int>& position_in, int left_idx,
int right_idx) {
dh::safe_cuda(cudaSetDevice(0));
std::vector<int64_t> left_count = {
std::count(position_in.begin(), position_in.end(), left_idx)};
dh::caching_device_vector<int64_t> d_left_count = left_count;
dh::caching_device_vector<int> position = position_in;
dh::caching_device_vector<int> position_out(position.size());
dh::caching_device_vector<RowPartitioner::RowIndexT> ridx(position.size());
thrust::sequence(ridx.begin(), ridx.end());
dh::caching_device_vector<RowPartitioner::RowIndexT> ridx_out(ridx.size());
RowPartitioner rp(0,10);
rp.SortPosition(
common::Span<int>(position.data().get(), position.size()),
common::Span<int>(position_out.data().get(), position_out.size()),
common::Span<RowPartitioner::RowIndexT>(ridx.data().get(), ridx.size()),
common::Span<RowPartitioner::RowIndexT>(ridx_out.data().get(), ridx_out.size()), left_idx,
right_idx, d_left_count.data().get(), nullptr);
thrust::host_vector<int> position_result = position_out;
thrust::host_vector<int> ridx_result = ridx_out;
// Check position is sorted
EXPECT_TRUE(std::is_sorted(position_result.begin(), position_result.end()));
// Check row indices are sorted inside left and right segment
EXPECT_TRUE(
std::is_sorted(ridx_result.begin(), ridx_result.begin() + left_count[0]));
EXPECT_TRUE(
std::is_sorted(ridx_result.begin() + left_count[0], ridx_result.end()));
// Check key value pairs are the same
for (auto i = 0ull; i < ridx_result.size(); i++) {
EXPECT_EQ(position_result[i], position_in[ridx_result[i]]);
}
}
TEST(GpuHist, SortPosition) {
TestSortPosition({1, 2, 1, 2, 1}, 1, 2);
TestSortPosition({1, 1, 1, 1}, 1, 2);
TestSortPosition({2, 2, 2, 2}, 1, 2);
TestSortPosition({1, 2, 1, 2, 3}, 1, 2);
}
void TestUpdatePosition() {
const int kNumRows = 10;
RowPartitioner rp(0, kNumRows);
auto rows = rp.GetRowsHost(0);
EXPECT_EQ(rows.size(), kNumRows);
for (auto i = 0ull; i < kNumRows; i++) {
EXPECT_EQ(rows[i], i);
}
// Send the first five training instances to the right node
// and the second 5 to the left node
rp.UpdatePosition(0, 1, 2,
[=] __device__(RowPartitioner::RowIndexT ridx) {
if (ridx > 4) {
return 1;
}
else {
return 2;
}
});
rows = rp.GetRowsHost(1);
for (auto r : rows) {
EXPECT_GT(r, 4);
}
rows = rp.GetRowsHost(2);
for (auto r : rows) {
EXPECT_LT(r, 5);
}
// Split the left node again
rp.UpdatePosition(1, 3, 4, [=]__device__(RowPartitioner::RowIndexT ridx)
{
if (ridx < 7) {
return 3
;
}
return 4;
});
EXPECT_EQ(rp.GetRows(3).size(), 2);
EXPECT_EQ(rp.GetRows(4).size(), 3);
// Check position is as expected
EXPECT_EQ(rp.GetPositionHost(), std::vector<bst_node_t>({3,3,4,4,4,2,2,2,2,2}));
}
TEST(RowPartitioner, Basic) { TestUpdatePosition(); }
void TestFinalise() {
const int kNumRows = 10;
RowPartitioner rp(0, kNumRows);
rp.FinalisePosition([=]__device__(RowPartitioner::RowIndexT ridx, int position)
{
return 7;
});
auto position = rp.GetPositionHost();
for(auto p:position)
{
EXPECT_EQ(p, 7);
}
}
TEST(RowPartitioner, Finalise) { TestFinalise(); }
void TestIncorrectRow() {
RowPartitioner rp(0, 1);
rp.UpdatePosition(0, 1, 2, [=]__device__ (RowPartitioner::RowIndexT ridx)
{
return 4; // This is not the left branch or the right branch
});
}
TEST(RowPartitioner, IncorrectRow) {
ASSERT_DEATH({ TestIncorrectRow(); },".*");
}
} // namespace tree
} // namespace xgboost
|
d4c800404a6f075d8c228917f8c477975a4afca5.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "includes.h"
__global__ static void convertToRGBTestKernel(const uint16_t *pV210, uint8_t *tt, int nSrcWidth, int nDstWidth, int nDstHeight) {
int tid = blockIdx.x * blockDim.x + threadIdx.x;
int tidd = blockIdx.y * blockDim.y + threadIdx.y;
uint32_t v0, y0, u0, y2, u1, y1, u2, y3, v1, y5, v2, y4;
uint4 pF;
if (tid < (nSrcWidth / 8) && tidd < nDstHeight) {
int j = tidd * nSrcWidth;
int k = tid * 8;
pF.x = (uint32_t)pV210[j + k + 0] + ((uint32_t)pV210[j + k + 1] << 16);
pF.y = (uint32_t)pV210[j + k + 2] + ((uint32_t)pV210[j + k + 3] << 16);
pF.z = (uint32_t)pV210[j + k + 4] + ((uint32_t)pV210[j + k + 5] << 16);
pF.w = (uint32_t)pV210[j + k + 6] + ((uint32_t)pV210[j + k + 7] << 16);
v0 = (uint32_t)((pF.x & 0x3FF00000) >> 20);
y0 = (uint32_t)((pF.x & 0x000FFC00) >> 10) * 1000;
u0 = (uint32_t)(pF.x & 0x000003FF);
y2 = (uint32_t)((pF.y & 0x3FF00000) >> 20) * 1000;
u1 = (uint32_t)((pF.y & 0x000FFC00) >> 10);
y1 = (uint32_t)(pF.y & 0x000003FF) * 1000;
u2 = (uint32_t)((pF.z & 0x3FF00000) >> 20);
y3 = (uint32_t)((pF.z & 0x000FFC00) >> 10) * 1000;
v1 = (uint32_t)(pF.z & 0x000003FF);
y5 = (uint32_t)((pF.w & 0x3FF00000) >> 20) * 1000;
v2 = (uint32_t)((pF.w & 0x000FFC00) >> 10);
y4 = (uint32_t)(pF.w & 0x000003FF) * 1000;
k = tid * 18;
j *= 9;
j /= 4;
int r = 1407 * v0 - 720384, g = 716 * v0 + 345 * u0 - 543232, b = 1779 * u0 - 910848;
tt[j + k + 0] = (y0 + r) * 0.249 / 1000;
tt[j + k + 1] = (y0 - g) * 0.249 / 1000;
tt[j + k + 2] = (y0 + b) * 0.249 / 1000;
tt[j + k + 3] = (y1 + r) * 0.249 / 1000;
tt[j + k + 4] = (y1 - g) * 0.249 / 1000;
tt[j + k + 5] = (y1 + b) * 0.249 / 1000;
r = 1407 * v1 - 720384, g = 716 * v1 + 345 * u1 - 543232, b = 1779 * u1 - 910848;
tt[j + k + 6] = (y2 + r) * 0.249 / 1000;
tt[j + k + 7] = (y2 - g) * 0.249 / 1000;
tt[j + k + 8] = (y2 + b) * 0.249 / 1000;
tt[j + k + 9] = (y3 + r) * 0.249 / 1000;
tt[j + k + 10] = (y3 - g) * 0.249 / 1000;
tt[j + k + 11] = (y3 + b) * 0.249 / 1000;
r = 1407 * v2 - 720384, g = 716 * v2 + 345 * u2 - 543232, b = 1779 * u2 - 910848;
tt[j + k + 12] = (y4 + r) * 0.249 / 1000;
tt[j + k + 13] = (y4 - g) * 0.249 / 1000;
tt[j + k + 14] = (y4 + b) * 0.249 / 1000;
tt[j + k + 15] = (y5 + r) * 0.249 / 1000;
tt[j + k + 16] = (y5 - g) * 0.249 / 1000;
tt[j + k + 17] = (y5 + b) * 0.249 / 1000;
}
} | d4c800404a6f075d8c228917f8c477975a4afca5.cu | #include "includes.h"
__global__ static void convertToRGBTestKernel(const uint16_t *pV210, uint8_t *tt, int nSrcWidth, int nDstWidth, int nDstHeight) {
int tid = blockIdx.x * blockDim.x + threadIdx.x;
int tidd = blockIdx.y * blockDim.y + threadIdx.y;
uint32_t v0, y0, u0, y2, u1, y1, u2, y3, v1, y5, v2, y4;
uint4 pF;
if (tid < (nSrcWidth / 8) && tidd < nDstHeight) {
int j = tidd * nSrcWidth;
int k = tid * 8;
pF.x = (uint32_t)pV210[j + k + 0] + ((uint32_t)pV210[j + k + 1] << 16);
pF.y = (uint32_t)pV210[j + k + 2] + ((uint32_t)pV210[j + k + 3] << 16);
pF.z = (uint32_t)pV210[j + k + 4] + ((uint32_t)pV210[j + k + 5] << 16);
pF.w = (uint32_t)pV210[j + k + 6] + ((uint32_t)pV210[j + k + 7] << 16);
v0 = (uint32_t)((pF.x & 0x3FF00000) >> 20);
y0 = (uint32_t)((pF.x & 0x000FFC00) >> 10) * 1000;
u0 = (uint32_t)(pF.x & 0x000003FF);
y2 = (uint32_t)((pF.y & 0x3FF00000) >> 20) * 1000;
u1 = (uint32_t)((pF.y & 0x000FFC00) >> 10);
y1 = (uint32_t)(pF.y & 0x000003FF) * 1000;
u2 = (uint32_t)((pF.z & 0x3FF00000) >> 20);
y3 = (uint32_t)((pF.z & 0x000FFC00) >> 10) * 1000;
v1 = (uint32_t)(pF.z & 0x000003FF);
y5 = (uint32_t)((pF.w & 0x3FF00000) >> 20) * 1000;
v2 = (uint32_t)((pF.w & 0x000FFC00) >> 10);
y4 = (uint32_t)(pF.w & 0x000003FF) * 1000;
k = tid * 18;
j *= 9;
j /= 4;
int r = 1407 * v0 - 720384, g = 716 * v0 + 345 * u0 - 543232, b = 1779 * u0 - 910848;
tt[j + k + 0] = (y0 + r) * 0.249 / 1000;
tt[j + k + 1] = (y0 - g) * 0.249 / 1000;
tt[j + k + 2] = (y0 + b) * 0.249 / 1000;
tt[j + k + 3] = (y1 + r) * 0.249 / 1000;
tt[j + k + 4] = (y1 - g) * 0.249 / 1000;
tt[j + k + 5] = (y1 + b) * 0.249 / 1000;
r = 1407 * v1 - 720384, g = 716 * v1 + 345 * u1 - 543232, b = 1779 * u1 - 910848;
tt[j + k + 6] = (y2 + r) * 0.249 / 1000;
tt[j + k + 7] = (y2 - g) * 0.249 / 1000;
tt[j + k + 8] = (y2 + b) * 0.249 / 1000;
tt[j + k + 9] = (y3 + r) * 0.249 / 1000;
tt[j + k + 10] = (y3 - g) * 0.249 / 1000;
tt[j + k + 11] = (y3 + b) * 0.249 / 1000;
r = 1407 * v2 - 720384, g = 716 * v2 + 345 * u2 - 543232, b = 1779 * u2 - 910848;
tt[j + k + 12] = (y4 + r) * 0.249 / 1000;
tt[j + k + 13] = (y4 - g) * 0.249 / 1000;
tt[j + k + 14] = (y4 + b) * 0.249 / 1000;
tt[j + k + 15] = (y5 + r) * 0.249 / 1000;
tt[j + k + 16] = (y5 - g) * 0.249 / 1000;
tt[j + k + 17] = (y5 + b) * 0.249 / 1000;
}
} |
d5fcc11bcbbfcaa77737b1c79322f9515f388eb2.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "AntiPenetrationWallCUDA.cuh"
extern "C"
{
__host__ void AntiPenetrationWallKernelBindTextures( float4 const* pdLineStart,
float4 const* pdLineEnd,
float4 const* pdLineNormal,
uint const numLines
);
__host__ void AntiPenetrationWallKernelUnbindTextures( void );
__global__ void AntiPenetrationWallKernel( float4 const* pdPosition,
float4 * pdDirection,
float const* pdSpeed,
uint const* pdKNLIndices, // Indices of the K Nearest line segments...
uint const k, // Number of lines in KNL.
float const elapsedTime,
uint const numAgents,
uint const numLines,
uint * pdAppliedKernels
);
}
using namespace OpenSteer;
AntiPenetrationWALLCUDA::AntiPenetrationWALLCUDA( AgentGroup * pAgentGroup, KNNData * pKNNData, WallGroup * pWallGroup, float const elapsedTime, uint const doNotApplyWith )
: AbstractCUDAKernel( pAgentGroup, 0.f, doNotApplyWith ),
m_pKNNData( pKNNData ),
m_pWallGroup( pWallGroup ),
m_fElapsedTime( elapsedTime )
{
}
void AntiPenetrationWALLCUDA::init( void )
{
// Nothing to do.
}
void AntiPenetrationWALLCUDA::run( void )
{
dim3 grid = gridDim();
dim3 block = blockDim();
float4 const* pdPosition = m_pAgentGroupData->pdPosition();
float4 * pdDirection = m_pAgentGroupData->pdDirection();
float const* pdSpeed = m_pAgentGroupData->pdSpeed();
uint const& numAgents = getNumAgents();
uint const* pdKNLIndices = m_pKNNData->pdKNNIndices();
uint const& k = m_pKNNData->k();
uint * pdAppliedKernels = m_pAgentGroupData->pdAppliedKernels();
float4 const* pdLineStart = m_pWallGroup->GetWallGroupData().pdLineStart();
float4 const* pdLineEnd = m_pWallGroup->GetWallGroupData().pdLineEnd();
float4 const* pdLineNormal = m_pWallGroup->GetWallGroupData().pdLineNormal();
uint const& numLines = m_pWallGroup->Size();
size_t const shMemSize = k * THREADSPERBLOCK * sizeof(uint);
// Bind the textures.
AntiPenetrationWallKernelBindTextures( pdLineStart, pdLineEnd, pdLineNormal, numLines );
hipLaunchKernelGGL(( AntiPenetrationWallKernel), dim3(grid), dim3(block), shMemSize, 0, pdPosition,
pdDirection,
pdSpeed,
pdKNLIndices,
k,
m_fElapsedTime,
numAgents,
numLines,
pdAppliedKernels
);
cutilCheckMsg( "AntiPenetrationWallKernel failed." );
//CUDA_SAFE_CALL( hipDeviceSynchronize() );
// Unbind the textures.
AntiPenetrationWallKernelUnbindTextures();
}
void AntiPenetrationWALLCUDA::close( void )
{
// Agent group data may have changed.
m_pAgentGroup->SetSyncHost();
}
| d5fcc11bcbbfcaa77737b1c79322f9515f388eb2.cu | #include "AntiPenetrationWallCUDA.cuh"
extern "C"
{
__host__ void AntiPenetrationWallKernelBindTextures( float4 const* pdLineStart,
float4 const* pdLineEnd,
float4 const* pdLineNormal,
uint const numLines
);
__host__ void AntiPenetrationWallKernelUnbindTextures( void );
__global__ void AntiPenetrationWallKernel( float4 const* pdPosition,
float4 * pdDirection,
float const* pdSpeed,
uint const* pdKNLIndices, // Indices of the K Nearest line segments...
uint const k, // Number of lines in KNL.
float const elapsedTime,
uint const numAgents,
uint const numLines,
uint * pdAppliedKernels
);
}
using namespace OpenSteer;
AntiPenetrationWALLCUDA::AntiPenetrationWALLCUDA( AgentGroup * pAgentGroup, KNNData * pKNNData, WallGroup * pWallGroup, float const elapsedTime, uint const doNotApplyWith )
: AbstractCUDAKernel( pAgentGroup, 0.f, doNotApplyWith ),
m_pKNNData( pKNNData ),
m_pWallGroup( pWallGroup ),
m_fElapsedTime( elapsedTime )
{
}
void AntiPenetrationWALLCUDA::init( void )
{
// Nothing to do.
}
void AntiPenetrationWALLCUDA::run( void )
{
dim3 grid = gridDim();
dim3 block = blockDim();
float4 const* pdPosition = m_pAgentGroupData->pdPosition();
float4 * pdDirection = m_pAgentGroupData->pdDirection();
float const* pdSpeed = m_pAgentGroupData->pdSpeed();
uint const& numAgents = getNumAgents();
uint const* pdKNLIndices = m_pKNNData->pdKNNIndices();
uint const& k = m_pKNNData->k();
uint * pdAppliedKernels = m_pAgentGroupData->pdAppliedKernels();
float4 const* pdLineStart = m_pWallGroup->GetWallGroupData().pdLineStart();
float4 const* pdLineEnd = m_pWallGroup->GetWallGroupData().pdLineEnd();
float4 const* pdLineNormal = m_pWallGroup->GetWallGroupData().pdLineNormal();
uint const& numLines = m_pWallGroup->Size();
size_t const shMemSize = k * THREADSPERBLOCK * sizeof(uint);
// Bind the textures.
AntiPenetrationWallKernelBindTextures( pdLineStart, pdLineEnd, pdLineNormal, numLines );
AntiPenetrationWallKernel<<< grid, block, shMemSize>>>( pdPosition,
pdDirection,
pdSpeed,
pdKNLIndices,
k,
m_fElapsedTime,
numAgents,
numLines,
pdAppliedKernels
);
cutilCheckMsg( "AntiPenetrationWallKernel failed." );
//CUDA_SAFE_CALL( cudaThreadSynchronize() );
// Unbind the textures.
AntiPenetrationWallKernelUnbindTextures();
}
void AntiPenetrationWALLCUDA::close( void )
{
// Agent group data may have changed.
m_pAgentGroup->SetSyncHost();
}
|
ae5aadf16e3e97de57d1a6ce3763d6dee1228555.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/*!
* Copyright (c) 2016 by Contributors
* \file q_fully_connected.cu
* \brief Quantized FC operator
* \author HPI-DeepLearning
*/
#include "./q_fully_connected-inl.h"
#include <mshadow/tensor.h>
#include "./xnor_kernels.h"
namespace mshadow {
namespace cuda {
/*
* m: batch size
* n: input_dim e.g. 1024
* k: hidden_num e.g. 1000
*/
inline void _BinaryInferenceFullyConnectedForward(int m, int n, int k,
const Tensor<gpu, 2, float> &data,
Tensor<gpu, 1, float> &workspace,
mxnet::op::xnor_cpu::BINARY_WORD* wmat_binarized,
Tensor<gpu, 2, float> &out) {
CHECK_EQ(workspace.shape_.Size() * sizeof(workspace[0]) * CHAR_BIT, n * m);
hipStream_t stream = Stream<gpu>::GetStream(out.stream_);
//set memory
float *fA = data.dptr_;
// float *fB = wmat.dptr_;
float *fC = out.dptr_;
xnor_cuda::BINARY_WORD* binary_row = (xnor_cuda::BINARY_WORD*) workspace.dptr_;
//concatinates matrix (m x n) -> (m x n/32)
int threads_per_block = xnor_cuda::get_next_block_dim(m*n/xnor_cuda::BITS_PER_BINARY_WORD);
dim3 conc_block(threads_per_block, 1, 1);
dim3 conc_grid(m*n/(threads_per_block*xnor_cuda::BITS_PER_BINARY_WORD)+1,1);
hipLaunchKernelGGL(( xnor_cuda::concatenate_rows_kernel), dim3(conc_grid), dim3(conc_block), 0, stream, fA, binary_row, m*n/xnor_cuda::BITS_PER_BINARY_WORD);
//get block size
threads_per_block = xnor_cuda::get_next_block_dim(m, n/xnor_cuda::BITS_PER_BINARY_WORD, k);
// Shared memory used to store Asub and Bsub respectively
int memsize = threads_per_block*threads_per_block*sizeof(xnor_cuda::BINARY_WORD)*2;
//perform xnor gemm
dim3 block(threads_per_block, threads_per_block);
dim3 grid(k/threads_per_block + 1, m/threads_per_block + 1);
hipLaunchKernelGGL(( xnor_cuda::xnor_gemm), dim3(grid), dim3(block), memsize, stream, binary_row, (xnor_cuda::BINARY_WORD*)wmat_binarized, fC,
m, n/xnor_cuda::BITS_PER_BINARY_WORD, k,
threads_per_block);
hipDeviceSynchronize();
}
} // namespace cuda
inline void QFullyConnectedForward(int m, int n, int k,
const Tensor<gpu, 2, float> &data,
Tensor<gpu, 1, float> &workspace,
mxnet::op::xnor_cpu::BINARY_WORD* wmat_binarized,
Tensor<gpu, 2, float> &out) {
cuda::_BinaryInferenceFullyConnectedForward(m, n, k, data, workspace, wmat_binarized, out);
}
inline void QFullyConnectedForward(int m, int n, int k,
const Tensor<gpu, 2, float> &data,
Tensor<gpu, 1, float> &workspace,
const Tensor<gpu, 2, float> &wmat,
Tensor<gpu, 2, float> &out) {
hipStream_t stream = Stream<gpu>::GetStream(out.stream_);
float *fB = wmat.dptr_; //note that the weight matrix 'wmat' here should be transposed (w.T) one.
mxnet::op::xnor_cpu::BINARY_WORD *wmat_binarized;
hipMalloc(&wmat_binarized, n*k/xnor_cuda::BITS_PER_BINARY_WORD*sizeof(mxnet::op::xnor_cpu::BINARY_WORD));
//concatinates matrix (n x k) -> (n/32 x k)
int threads_per_block = xnor_cuda::get_next_block_dim(k);;
int blocks_per_grid = k / threads_per_block + 1;
dim3 conc_block(threads_per_block,1,1);
dim3 conc_grid(blocks_per_grid,1);
hipLaunchKernelGGL(( xnor_cuda::concatenate_cols_kernel), dim3(conc_grid), dim3(conc_block), 0, stream, fB, (xnor_cuda::BINARY_WORD*)wmat_binarized, n, k);
hipDeviceSynchronize();
cuda::_BinaryInferenceFullyConnectedForward(m, n, k, data, workspace, wmat_binarized, out);
hipFree(wmat_binarized);
}
template<typename DType>
inline void QFullyConnectedForward(int m, int n, int k,
const Tensor<gpu, 2, DType> &data,
Tensor<gpu, 1, DType> &workspace,
mxnet::op::xnor_cpu::BINARY_WORD* wmat_binarized,
Tensor<gpu, 2, DType> &out) {
CHECK(false) << "only float supported";
}
template<typename DType>
inline void QFullyConnectedForward(int m, int n, int k,
const Tensor<gpu, 2, DType> &data,
Tensor<gpu, 1, DType> &workspace,
const Tensor<gpu, 2, DType> &wmat,
Tensor<gpu, 2, DType> &out) {
CHECK(false) << "only float supported";
}
} // namespace mshadow
namespace mxnet {
namespace op {
template<>
Operator* CreateOp<gpu>(QFullyConnectedParam param, int dtype,
std::vector<TShape> *in_shape,
std::vector<TShape> *out_shape,
Context ctx) {
Operator *op = NULL;
MSHADOW_REAL_TYPE_SWITCH(dtype, DType, {
op = new QFullyConnectedOp<gpu, DType>(param);
})
return op;
}
} // namespace op
} // namespace mxnet
| ae5aadf16e3e97de57d1a6ce3763d6dee1228555.cu | /*!
* Copyright (c) 2016 by Contributors
* \file q_fully_connected.cu
* \brief Quantized FC operator
* \author HPI-DeepLearning
*/
#include "./q_fully_connected-inl.h"
#include <mshadow/tensor.h>
#include "./xnor_kernels.h"
namespace mshadow {
namespace cuda {
/*
* m: batch size
* n: input_dim e.g. 1024
* k: hidden_num e.g. 1000
*/
inline void _BinaryInferenceFullyConnectedForward(int m, int n, int k,
const Tensor<gpu, 2, float> &data,
Tensor<gpu, 1, float> &workspace,
mxnet::op::xnor_cpu::BINARY_WORD* wmat_binarized,
Tensor<gpu, 2, float> &out) {
CHECK_EQ(workspace.shape_.Size() * sizeof(workspace[0]) * CHAR_BIT, n * m);
cudaStream_t stream = Stream<gpu>::GetStream(out.stream_);
//set memory
float *fA = data.dptr_;
// float *fB = wmat.dptr_;
float *fC = out.dptr_;
xnor_cuda::BINARY_WORD* binary_row = (xnor_cuda::BINARY_WORD*) workspace.dptr_;
//concatinates matrix (m x n) -> (m x n/32)
int threads_per_block = xnor_cuda::get_next_block_dim(m*n/xnor_cuda::BITS_PER_BINARY_WORD);
dim3 conc_block(threads_per_block, 1, 1);
dim3 conc_grid(m*n/(threads_per_block*xnor_cuda::BITS_PER_BINARY_WORD)+1,1);
xnor_cuda::concatenate_rows_kernel<<<conc_grid, conc_block, 0, stream>>>(fA, binary_row, m*n/xnor_cuda::BITS_PER_BINARY_WORD);
//get block size
threads_per_block = xnor_cuda::get_next_block_dim(m, n/xnor_cuda::BITS_PER_BINARY_WORD, k);
// Shared memory used to store Asub and Bsub respectively
int memsize = threads_per_block*threads_per_block*sizeof(xnor_cuda::BINARY_WORD)*2;
//perform xnor gemm
dim3 block(threads_per_block, threads_per_block);
dim3 grid(k/threads_per_block + 1, m/threads_per_block + 1);
xnor_cuda::xnor_gemm<<<grid, block, memsize, stream>>>(binary_row, (xnor_cuda::BINARY_WORD*)wmat_binarized, fC,
m, n/xnor_cuda::BITS_PER_BINARY_WORD, k,
threads_per_block);
cudaDeviceSynchronize();
}
} // namespace cuda
inline void QFullyConnectedForward(int m, int n, int k,
const Tensor<gpu, 2, float> &data,
Tensor<gpu, 1, float> &workspace,
mxnet::op::xnor_cpu::BINARY_WORD* wmat_binarized,
Tensor<gpu, 2, float> &out) {
cuda::_BinaryInferenceFullyConnectedForward(m, n, k, data, workspace, wmat_binarized, out);
}
inline void QFullyConnectedForward(int m, int n, int k,
const Tensor<gpu, 2, float> &data,
Tensor<gpu, 1, float> &workspace,
const Tensor<gpu, 2, float> &wmat,
Tensor<gpu, 2, float> &out) {
cudaStream_t stream = Stream<gpu>::GetStream(out.stream_);
float *fB = wmat.dptr_; //note that the weight matrix 'wmat' here should be transposed (w.T) one.
mxnet::op::xnor_cpu::BINARY_WORD *wmat_binarized;
cudaMalloc(&wmat_binarized, n*k/xnor_cuda::BITS_PER_BINARY_WORD*sizeof(mxnet::op::xnor_cpu::BINARY_WORD));
//concatinates matrix (n x k) -> (n/32 x k)
int threads_per_block = xnor_cuda::get_next_block_dim(k);;
int blocks_per_grid = k / threads_per_block + 1;
dim3 conc_block(threads_per_block,1,1);
dim3 conc_grid(blocks_per_grid,1);
xnor_cuda::concatenate_cols_kernel<<<conc_grid, conc_block, 0, stream>>>(fB, (xnor_cuda::BINARY_WORD*)wmat_binarized, n, k);
cudaDeviceSynchronize();
cuda::_BinaryInferenceFullyConnectedForward(m, n, k, data, workspace, wmat_binarized, out);
cudaFree(wmat_binarized);
}
template<typename DType>
inline void QFullyConnectedForward(int m, int n, int k,
const Tensor<gpu, 2, DType> &data,
Tensor<gpu, 1, DType> &workspace,
mxnet::op::xnor_cpu::BINARY_WORD* wmat_binarized,
Tensor<gpu, 2, DType> &out) {
CHECK(false) << "only float supported";
}
template<typename DType>
inline void QFullyConnectedForward(int m, int n, int k,
const Tensor<gpu, 2, DType> &data,
Tensor<gpu, 1, DType> &workspace,
const Tensor<gpu, 2, DType> &wmat,
Tensor<gpu, 2, DType> &out) {
CHECK(false) << "only float supported";
}
} // namespace mshadow
namespace mxnet {
namespace op {
template<>
Operator* CreateOp<gpu>(QFullyConnectedParam param, int dtype,
std::vector<TShape> *in_shape,
std::vector<TShape> *out_shape,
Context ctx) {
Operator *op = NULL;
MSHADOW_REAL_TYPE_SWITCH(dtype, DType, {
op = new QFullyConnectedOp<gpu, DType>(param);
})
return op;
}
} // namespace op
} // namespace mxnet
|
ea6fb4df296beca12d46ad8abf78bee011832c50.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "PBD_Basic.cuh"
#include"CCD_Basic.h"
KERNEL_FUNC float Distance(float3 p1, float3 p2)
{
return powf(powf((p1.x - p2.x), 2) + powf((p1.y - p2.y), 2) + powf((p1.z - p2.z), 2), 0.5);
}
// ----------------------------------------------------------------------------------------
// ConstraintPBD
void ConstraintPBD::InitDistanceConstr(BufferVector3f& meshPosBuffer, float stiffness, int resY, int resX)
{
// cout << __FUNCTION__ << " resolution:" << resY << "----" << resX << std::endl;
InitDistanceIndices(resY, resX);
InitDistanceInfo(meshPosBuffer, stiffness);
}
void ConstraintPBD::InitDistanceIndices(int resY, int resX)
{
int num = resY * resX;
for (int i = 0; i < num - resX; i++)
{
if (i % resX == 0)
{
topol.indices.m_Data.push_back(i);
topol.indices.m_Data.push_back(i + 1);
topol.indices.m_Data.push_back(i);
topol.indices.m_Data.push_back(i + resX);
}
else if (i % resX == resX - 1)
{
topol.indices.m_Data.push_back(i);
topol.indices.m_Data.push_back(i + resX);
topol.indices.m_Data.push_back(i);
topol.indices.m_Data.push_back(i + resX - 1);
}
else
{
topol.indices.m_Data.push_back(i);
topol.indices.m_Data.push_back(i + 1);
topol.indices.m_Data.push_back(i);
topol.indices.m_Data.push_back(i + resX);
topol.indices.m_Data.push_back(i);
topol.indices.m_Data.push_back(i + resX - 1);
}
}
for (int i = num - resX; i < num - 1; ++i)
{
topol.indices.m_Data.push_back(i);
topol.indices.m_Data.push_back(i + 1);
}
}
void ConstraintPBD::InitDistanceInfo(BufferVector3f& meshPosBuffer, float stiffness)
{
//
// constraint allocation
//
int count = 2;
int prims = topol.indices.GetSize() / count;
//cout << __FUNCTION__ <<" prims:"<< prims << std::endl;
for (int i = 0; i < prims; i++)
{
// init primList & sortedPrimID
int2 p;
p.x = i * count;
p.y = count;
topol.primList.m_Data.push_back(p);
sortedPrimId.m_Data.push_back(i); // WARNING: sortedPrimID JUST for now (without collision)
// init stiffness
stiffnessBuffer.m_Data.push_back(stiffness);
// init rest length
int i0 = topol.indices.m_Data[p.x];
int i1 = topol.indices.m_Data[p.x + 1];
float d = Distance(meshPosBuffer.m_Data[i0], meshPosBuffer.m_Data[i1]);
restLengthBuffer.m_Data.push_back(d);
// init contraint type
constraintType.m_Data.push_back(DISTANCE);
// init color
color.m_Data.push_back(-1);
prdColor.m_Data.push_back(-1);
}
/*
for (int a = 0; a < 2; a++)
{
int2 p;
p.x = indices.GetSize();
p.y = 1;
constrPBDBuffer.color.m_Data.push_back(-1);
constrPBDBuffer.prdColor.m_Data.push_back(-1);
constrPBDBuffer.sortedColor.m_Data.push_back(-1);
constrPBDBuffer.constraintType.m_Data.push_back(ANCHOR);
constrPBDBuffer.stiffness.m_Data.push_back(1.0);
indices.m_Data.push_back(a == 0 ? 0 : resY - 1);
primList.m_Data.push_back(p);
sortedPrimID.m_Data.push_back(prims);
std::cout << "primList:" << primList.GetSize() << std::endl;
std::cout << "indices:" << indices.m_Data[indices.GetSize()] << std::endl;
}
*/
}
void ConstraintPBD::InitBendingConstr()
{
//todo
//init restAngleBuffer;
/*
for (auto edge : edge2primMap)
{
//edge2primMap numEdge*2
//
// edge 2 prim
// int2 List;
}
*/
}
void ConstraintPBD::InitAnchorConstr(BufferVector3f& meshPosBuffer, float stiffness, int resY)
{
int currPrimSize = sortedPrimId.GetSize();
// std::cout << __FUNCTION__ << std::endl;
for (int a = 0; a < 2; a++)
{
int idx = (a == 0 ? 0 : resY - 1);
// init primList
int2 p;
p.x = topol.indices.GetSize();
p.y = 1;
topol.primList.m_Data.push_back(p);
// init indices
topol.indices.m_Data.push_back(idx);
// init restPosBuffer
restPosBuffer.m_Data.push_back(meshPosBuffer.m_Data[idx]);
// init stiffnessBuffer
stiffnessBuffer.m_Data.push_back(stiffness);
// init constraintType
constraintType.m_Data.push_back(ANCHOR);
//init color prdColor sortedPrimID
color.m_Data.push_back(stiffness);
prdColor.m_Data.push_back(stiffness);
sortedPrimId.m_Data.push_back(currPrimSize + a);
/*std::cout << "primList:" << primList.GetSize() << std::endl;
std::cout << "indices:" << indices.m_Data[indices.GetSize()] << std::endl;*/
}
}
void ConstraintPBD::GenePoint2PrimsMap(Topology topol)
{
auto primList = &(topol.primList);
auto indices = &(topol.indices);
for (int primId = 0; primId < primList->GetSize(); ++primId)
{
int2 currPrim = primList->m_Data[primId];
for (int i = 0; i < currPrim.y; ++i)
{
Point2PrimsMap[indices->m_Data[currPrim.x + i]].push_back(primId);
}
}
// printf("generated point2prims map\n");
}
void ConstraintPBD::GenePrim2PrimsMap(Topology topol)
{
auto primList = &(topol.primList);
auto indices = &(topol.indices);
for (int primId = 0; primId < primList->GetSize(); ++primId)
{
//std::set<int> linkedPrimsSet;
std::vector<int> linkedPrimsSet;
int nptPrims = primList->m_Data[primId].y;
for (int ptId = 0; ptId < nptPrims; ++ptId)
{
int currPtId = indices->m_Data[primList->m_Data[primId].x + ptId];
// printf("primId: %d; ", primId);
auto linkedPrims = Point2PrimsMap[currPtId];
// printf("linked primtive id: ");
for (int nlp = 0; nlp < linkedPrims.size(); nlp++)
{
int linkPrimId = linkedPrims[nlp];
if (linkPrimId == primId)
continue;
linkedPrimsSet.push_back(linkPrimId);
}
}
int startIdx = Prim2PrimsMap.indices.GetSize();
Prim2PrimsMap.indices.m_Data.insert(
std::end(Prim2PrimsMap.indices.m_Data),
std::begin(linkedPrimsSet),
std::end(linkedPrimsSet));
Prim2PrimsMap.startNumList.m_Data.push_back(make_int2(startIdx, linkedPrimsSet.size()));
}
//printf("generated prim2prims map\n");
}
void ConstraintPBD::AssignColorsCPU()
{
auto p2pIndices = &(Prim2PrimsMap.indices);
auto p2pStartNumList = &(Prim2PrimsMap.startNumList);
for (int idx = 0; idx < topol.primList.GetSize(); idx++)
{
if (idx == 0)
detectConflict = 0;
if (color.m_Data[idx] >= 0)
continue;
int nidx = p2pStartNumList->m_Data[idx].x;
//int nlast = p2pStartNumList->m_Data[idx].x + p2pStartNumList->m_Data[idx].y;
int nlast = p2pStartNumList->m_Data[idx].x + p2pStartNumList->m_Data[idx].y;
int c = -1, offset = 0;
while (c < 0)
{
// Bit flag to store seen neighbor colors.
unsigned long forbidden = 0;
for (int i = nidx; i < nlast; ++i)
{
int n = p2pIndices->m_Data[i];
int nc = color.m_Data[n] - offset;
if (nc >= 0 && nc < FORBIDBITS)
forbidden |= (1ul << nc);
}
// Check if there's an open color in the current bitrange.
if (forbidden ^ MAXFORBID)
{
unsigned long x = forbidden;
c = offset;
// Find position of first zero bit.
x = (~x) & (x + 1);
// Color is log2(x)
while (x > 1)
{
x /= 2;
c++;
}
}
else
{
// Otherwise we need to try again with the next range of colors.
offset += FORBIDBITS;
}
}
// Record speculative coloring.
prdColor.m_Data[idx] = c;
}
}
void ConstraintPBD::ResolveConflictsCPU()
{
auto primList = &(topol.primList);
auto p2pIndices = &(Prim2PrimsMap.indices);
auto p2pStartNumList = &(Prim2PrimsMap.startNumList);
for (int idx = 0; idx < primList->GetSize(); ++idx)
{
// Nothing to do if already colored.
if (color.m_Data[idx] >= 0)
continue;
int nidx = p2pStartNumList->m_Data[idx].x;
int nlast = p2pStartNumList->m_Data[idx].x + p2pStartNumList->m_Data[idx].y;
int c = prdColor.m_Data[idx];
//int c = newcolor[idx];
int conflict = 0;
int npt = primList->m_Data[idx].y;
for (int i = nidx; i < nlast; ++i)
{
int n = p2pIndices->m_Data[i];
int pc = prdColor.m_Data[n];
// Check for conflict.
if (pc == c)
{
int nnpt = primList->m_Data[n].y;
// Resolution gives preference to primitives with more points,
// otherwise the one that comes first.
// (NOTE: We color in fewer iterations if we prioritize by number
// of graph neighbors, but then assuming we process prims by color,
// we're usually farther away from original point order leading to many
// cache misses and slower downstream processing.)
if (nnpt > npt ||
(nnpt == npt && n < idx))
{
conflict = 1;
break;
}
}
}
// If there's a conflict then reset sizes for more work,
// otherewise accept speculative color.
if (conflict)
{
detectConflict = primList->GetSize();
break;
}
else
color.m_Data[idx] = c;
}
}
// kernel functions for PBDObj class
void __global__ AssignColorsGPU(
int2* topolPrimList,
int* p2pIndices,
int2* p2pStartNumList,
int* color,
int* prdColor,
int* detectConflict,
int worksetLength)
{
int primId = blockIdx.x * blockDim.x + threadIdx.x;
// if(primId < 5) printf("%d: entering kernel1\n", primId);
if (primId >= worksetLength)
return;
if (primId == 0)
*detectConflict = 0;
if (color[primId] >= 0)
return;
int nidx = p2pStartNumList[primId].x;
int nlast = p2pStartNumList[primId].x + p2pStartNumList[primId].y;
int c = -1, offset = 0;
while (c < 0)
{
// Bit flag to store seen neighbor colors.
unsigned long forbidden = 0;
for (int i = nidx; i < nlast; ++i)
{
int n = p2pIndices[i];
int nc = color[n] - offset;
if (nc >= 0 && nc < FORBIDBITS)
forbidden |= (1ul << nc);
}
// Check if there's an open color in the current bitrange.
if (forbidden ^ MAXFORBID)
{
unsigned long x = forbidden;
c = offset;
// Find position of first zero bit.
x = (~x) & (x + 1);
// Color is log2(x)
while (x > 1)
{
x /= 2;
c++;
}
}
else
{
// Otherwise we need to try again with the next range of colors.
offset += FORBIDBITS;
}
}
// Record speculative coloring.
prdColor[primId] = c;
// if (primId < 5) printf("\tAssignColorsGPU-%d: prdColor %d\n", primId, prdColor[primId]);
}
void __global__ ResolveConflictsGPU(
int2* topolPrimList,
int* p2pIndices,
int2* p2pStartNumList,
int* color,
int* prdColor,
int* detectConflict,
int worksetLength)
{
int primId = blockIdx.x * blockDim.x + threadIdx.x;
// if(primId < 5) printf("\tResolveConflictsGPU-%d: prdColor %d\n", primId, prdColor[primId]);
if (primId >= worksetLength)
return;
if (color[primId] >= 0)
return;
int nidx = p2pStartNumList[primId].x;
int nlast = p2pStartNumList[primId].x + p2pStartNumList[primId].y;
int c = prdColor[primId];
int conflict = 0;
int npt = topolPrimList[primId].y;
for (int i = nidx; i < nlast; ++i)
{
int n = p2pIndices[i];
int pc = prdColor[n];
// Check for conflict.
if (pc == c)
{
int nnpt = topolPrimList[n].y;
// Resolution gives preference to primitives with more points,
// otherwise the one that comes first.
// (NOTE: We color in fewer iterations if we prioritize by number
// of graph neighbors, but then assuming we process prims by color,
// we're usually farther away from original point order leading to many
// cache misses and slower downstream processing.)
if (nnpt > npt ||
(nnpt == npt && n < primId))
{
conflict = 1;
break;
}
}
}
// If there's a conflict then reset sizes for more work,
// otherewise accept speculative color.
if (conflict)
*detectConflict = worksetLength;
else
color[primId] = c;
}
void ConstraintPBD::EdgeColoring(int iterations)
{
switch (ht)
{
case CPU:
EdgeColoringCPU(iterations);
break;
case GPU:
EdgeColoringGPU(iterations);
break;
default:
break;
}
}
void ConstraintPBD::EdgeColoringCPU(int iterations)
{
for (int i = 0; i < iterations; ++i)
{
/*printf("Before iteration %d color: ", i);
for (int i = 0; i < constrPBDBuffer.color.GetSize(); ++i)
{
printf("%d // ", constrPBDBuffer.color.m_Data[i]);
}
printf("\n");
printf("iteration %d prd color: ", i);
for (int i = 0; i < constrPBDBuffer.color.GetSize(); ++i)
{
printf("%d // ", constrPBDBuffer.color.m_Data[i]);
}
printf("\n");*/
AssignColorsCPU();
ResolveConflictsCPU();
/*printf("After iteration %d color: ", i);
for (int i = 0; i < constrPBDBuffer.color.GetSize(); ++i)
{
printf("%d // ", constrPBDBuffer.color.m_Data[i]);
}
printf("\n");
printf("iteration %d prd color: ", i);
for (int i = 0; i < constrPBDBuffer.color.GetSize(); ++i)
{
printf("%d // ", constrPBDBuffer.color.m_Data[i]);
}
printf("\n");
if (detectConflict == 0)
break;
string fileName = "D:/colorEdge/color." + to_string(i) + ".cache";
IO::SaveBuffer(constrPBDBuffer.prdColor, fileName);
cout << "color saved" << endl;
printf("\n");*/
}
}
void ConstraintPBD::EdgeColoringGPU(int iterations)
{
auto primList = &(topol.primList);
int primNum = primList->GetSize();
auto p2pIndices = &(Prim2PrimsMap.indices);
auto p2pStartNumList = &(Prim2PrimsMap.startNumList);
uint2 blockSize = primList->EvalBlockSize(512);
//printf("Edge Color GPU: block dim = %d, thread dim = %d\n", blockSize.x, blockSize.y);
//// Host To Device: load buffers for Edge Coloring
//primList->MallocAndLoadToDevice(); // topolPrimList
//p2pIndices->MallocAndLoadToDevice(); // p2pIndices
//p2pStartNumList->MallocAndLoadToDevice(); // p2pStartNumList
//color->MallocAndLoadToDevice(); // color
//prdColor->MallocAndLoadToDevice(); // prdColor
//
//hipMalloc((void **)&dFlag, sizeof(int));
//hipError_t cudaStatus = hipMemcpy(dFlag, &detectConflict, sizeof(int), hipMemcpyHostToDevice);
//if(cudaStatus == hipSuccess) printf("\nMalloc and loads succeeded!\n");
// edge coloring SPMD
for (int i = 0; i < iterations; ++i)
{
AssignColorsGPU << < blockSize.x, blockSize.y >> > ((int2*)primList->GetDevicePtr(),
(int*)p2pIndices->GetDevicePtr(),
(int2*)p2pStartNumList->GetDevicePtr(),
(int*)color.GetDevicePtr(),
(int*)prdColor.GetDevicePtr(),
dFlag,
primNum);
ResolveConflictsGPU << < blockSize.x, blockSize.y >> > ((int2*)primList->GetDevicePtr(),
(int*)p2pIndices->GetDevicePtr(),
(int2*)p2pStartNumList->GetDevicePtr(),
(int*)color.GetDevicePtr(),
(int*)prdColor.GetDevicePtr(),
dFlag,
primNum);
}
// Device To Host: load buffers back
color.LoadToHost();
/*if (color.LoadToHost())
printf("Edge Color GPU: Load color Back Succeeded!\n");*/
/*printf("\nAfter iteration(color): ");
for (int i = 0; i < constrPBDBuffer.color.GetSize(); ++i)
{
printf("%d // ", constrPBDBuffer.color.m_Data[i]);
}
printf("\n");*/
// Free GPU memory
/*hipFree(primList->GetDevicePtr());
hipFree(p2pIndices->GetDevicePtr());
hipFree(p2pStartNumList->GetDevicePtr());
hipFree(color->GetDevicePtr());
hipFree(prdColor->GetDevicePtr());
hipFree(dFlag);*/
}
void ConstraintPBD::SortEdgesColors()
{
// cout << "--------" << __FUNCTION__ << "--------" << endl;
/*for (int i = 0; i < color->GetSize(); ++i)
{
printf("%d - ", color->m_Data[i]);
}
printf("\n");
for (int i = 0; i < sortedPrimID->GetSize(); ++i)
{
printf("%d - ", sortedPrimID->m_Data[i]);
}
printf("\n");*/
// cout << __FUNCDNAME__ << endl;
thrust::sort_by_key(color.m_Data.begin(), color.m_Data.end(), sortedPrimId.m_Data.begin());
auto dColor = &(color);
auto dSortedPrimId = &(sortedPrimId);
dColor->LoadToDevice();
dSortedPrimId->LoadToDevice();
/*for (int i = 0; i < color.GetSize(); ++i)
{
printf("color : %d - ", color.m_Data[i]);
}
printf("\n");
for (int i = 0; i < sortedPrimId.GetSize(); ++i)
{
printf("sortedPrimId: %d - ", sortedPrimId.m_Data[i]);
}
printf("\n");*/
/*for (int i = 0; i < sortedColor->GetSize(); ++i)
{
printf("%d - ", sortedColor->m_Data[i]);
}
printf("\n");
printf("\n");
for (int i = 0; i < color->GetSize(); ++i)
{
printf("%d - ", color->m_Data[i]);
}
printf("\n");*/
}
void ConstraintPBD::EvalWorksets()
{
//cout << "--------" << __FUNCTION__ << "--------" << endl;
colorWorksets.m_Data.clear();
int count = 1;
for (int i = 1; i < color.GetSize(); ++i)
{
if (i == color.GetSize() - 1 && color.m_Data[i] == color.m_Data[i - 1])
{
count++;
colorWorksets.m_Data.push_back(make_int2(i - count + 1, count));
}
else if (i == color.GetSize() - 1 && color.m_Data[i] != color.m_Data[i - 1])
{
colorWorksets.m_Data.push_back(make_int2(i - count, count));
colorWorksets.m_Data.push_back(make_int2(i, 1));
}
else if (i != color.GetSize() - 1 && color.m_Data[i] != color.m_Data[i - 1])
{
colorWorksets.m_Data.push_back(make_int2(i - count, count));
count = 1;
}
else
{
count++;
}
}
/*for (int i = 0; i < sortedColor->GetSize(); ++i)
{
printf("%d - ", sortedColor->m_Data[i]);
}*/
/*for (int i = 0; i < colorWorksets.GetSize(); ++i)
{
printf("start: %d, num: %d ", colorWorksets.m_Data[i].x, colorWorksets.m_Data[i].y);
}
printf("\n");*/
}
void ConstraintPBD::Save(std::ofstream& ofs)
{
topol.Save(ofs);
prdPBuffer.SetName("prdPBuffer");
IO::SaveBuffer(prdPBuffer, ofs);
restPosBuffer.SetName("restPosBuffer"); // empty now, initial in initPosition which is not used in read data from houdini
IO::SaveBuffer(restPosBuffer, ofs);
restLengthBuffer.SetName("restLengthBuffer");
IO::SaveBuffer(restLengthBuffer, ofs);
stiffnessBuffer.SetName("stiffnessBuffer");
IO::SaveBuffer(stiffnessBuffer, ofs);
constraintType.SetName("constraintType");
IO::SaveBuffer(constraintType, ofs);
}
// ----------------------------------------------------------------------------------------
// PBDObject class
void PBDObject::Init()
{
initMeshTopol();
initConstr();
}
void PBDObject::Init(string topolFileName, string distConstrFileName)
{
/*if ((IO::readTopolFromTxt(topolFileName, this)) && (IO::readDistConstrFromTxt(distConstrFileName, this)))
printf("PBD Object was initialized successfully\n");*/
bool readTopol = IO::ReadTopolFromTxt(topolFileName, this);
bool readConstr = IO::ReadDistConstrFromTxt(distConstrFileName, this);
if (readTopol && readConstr)
printf("PBD Object was initialized successfully\n");
}
void PBDObject::ContinueSimInit(string meshTopolPath, string constrPath, HardwareType hardwareType)
{
ht = hardwareType;
bool readTopol = IO::ReadTopolFromCache(meshTopolPath, this);
bool readConstr = IO::ReadConstraintFromCache(constrPath, this);
if (readTopol && readConstr)
printf("PBD Object was initialized successfully\n");
}
void PBDObject::SetConstrOption(uint ct, float* stiffnessSetting)
{
this->ct = ct;
this->stiffnessSetting = stiffnessSetting;
}
void PBDObject::initMeshTopol()
{
// OpenGL Topology
meshTopol.indices.SetName("Indices");
meshTopol.posBuffer.SetName("P");
meshTopol.primList.SetName("primList");
initPosition(make_float2(0.0, 0.0));
initMassVel();
initMeshTopolIndices();
}
void PBDObject::initMassVel()
{
// init mass
float3 initVel = make_float3(0.0f, 0.0f, 0.0f);
int num = resY * resX;
for (int i = 0; i < num; i++)
{
massBuffer.m_Data.push_back(1.0);
/*if (i == 0 || i == resY - 1)
{
massBuffer.m_Data.push_back(0.0);
}
else
{
massBuffer.m_Data.push_back(1.0);
}*/
velBuffer.m_Data.push_back(initVel);
}
}
void PBDObject::InitGPUBuffers()
{
// printf("init GPU buffers\n");
auto dPrimList = &(constrPBDBuffer.topol.primList);
auto dP2pIndices = &(constrPBDBuffer.Prim2PrimsMap.indices);
auto dP2pStartNumList = &(constrPBDBuffer.Prim2PrimsMap.startNumList);
auto dColor = &(constrPBDBuffer.color);
auto dPrdColor = &(constrPBDBuffer.prdColor);
auto dsortedPrimId = &(constrPBDBuffer.sortedPrimId);
auto dConstrType = &(constrPBDBuffer.constraintType);
auto dVelBuffer = &(velBuffer);
auto dPrdPBuffer = &(constrPBDBuffer.prdPBuffer);
auto dPositionBuffer = &(meshTopol.posBuffer);
auto dMassBuffer = &(massBuffer);
auto dRestLengthBuffer = &(constrPBDBuffer.restLengthBuffer);
auto dStiffnessBuffer = &(constrPBDBuffer.stiffnessBuffer);
auto dRestPosBuffer = &(constrPBDBuffer.restPosBuffer);
auto dIndices = &(constrPBDBuffer.topol.indices);
// Host To Device: load buffers for Edge Coloring
hipMalloc((void**)&constrPBDBuffer.dFlag, sizeof(int));
hipError_t cudaStatus = hipMemcpy(constrPBDBuffer.dFlag, &(constrPBDBuffer.detectConflict), sizeof(int), hipMemcpyHostToDevice);
//if (cudaStatus == hipSuccess) printf("\nMalloc and loads succeeded!\n");
dPrimList->MallocAndLoadToDevice(); // topolPrimList
dP2pIndices->MallocAndLoadToDevice(); // p2pIndices
dP2pStartNumList->MallocAndLoadToDevice(); // p2pStartNumList
dColor->MallocAndLoadToDevice(); // color
dPrdColor->MallocAndLoadToDevice(); // prdColor
dConstrType->MallocAndLoadToDevice(); // constrType
dsortedPrimId->DeviceMalloc();
dVelBuffer->MallocAndLoadToDevice(); // velocity Buffer
dPrdPBuffer->MallocAndLoadToDevice(); // predicted position buffer
dPositionBuffer->MallocAndLoadToDevice(); // point real position buffer
dMassBuffer->MallocAndLoadToDevice();
dRestLengthBuffer->MallocAndLoadToDevice();
dStiffnessBuffer->MallocAndLoadToDevice();
dRestPosBuffer->MallocAndLoadToDevice();
dIndices->MallocAndLoadToDevice();
}
void PBDObject::freeGPUBuffers()
{
auto dPrimList = &(constrPBDBuffer.topol.primList);
auto dP2pIndices = &(constrPBDBuffer.Prim2PrimsMap.indices);
auto dP2pStartNumList = &(constrPBDBuffer.Prim2PrimsMap.startNumList);
auto dColor = &(constrPBDBuffer.color);
auto dPrdColor = &(constrPBDBuffer.prdColor);
auto dSortedPrimId = &(constrPBDBuffer.sortedPrimId);
auto dConstrType = &(constrPBDBuffer.constraintType);
auto dVelBuffer = &(velBuffer);
auto dPrdPBuffer = &(constrPBDBuffer.prdPBuffer);
auto dPositionBuffer = &(meshTopol.posBuffer);
auto dMassBuffer = &(massBuffer);
auto dRestLengthBuffer = &(constrPBDBuffer.restLengthBuffer);
auto dStiffnessBuffer = &(constrPBDBuffer.stiffnessBuffer);
auto dRestPosBuffer = &(constrPBDBuffer.restPosBuffer);
auto dIndices = &(constrPBDBuffer.topol.indices);
dPositionBuffer->LoadToHost();
dVelBuffer->LoadToHost();
hipFree(constrPBDBuffer.dFlag);
hipFree(dPrimList->GetDevicePtr());
hipFree(dP2pIndices->GetDevicePtr());
hipFree(dP2pStartNumList->GetDevicePtr());
hipFree(dColor->GetDevicePtr());
hipFree(dPrdColor->GetDevicePtr());
hipFree(dSortedPrimId->GetDevicePtr());
hipFree(dConstrType->GetDevicePtr());
hipFree(dVelBuffer->GetDevicePtr());
hipFree(dPrdPBuffer->GetDevicePtr());
hipFree(dPositionBuffer->GetDevicePtr());
hipFree(dMassBuffer->GetDevicePtr());
hipFree(dRestLengthBuffer->GetDevicePtr());
hipFree(dStiffnessBuffer->GetDevicePtr());
hipFree(dRestPosBuffer->GetDevicePtr());
hipFree(dIndices->GetDevicePtr());
}
// init : allocation
// setvalue
void PBDObject::initConstr()
{
constrPBDBuffer.ht = ht;
constrPBDBuffer.topol.posBuffer = meshTopol.posBuffer;
constrPBDBuffer.prdPBuffer = meshTopol.posBuffer;
constrPBDBuffer.topol.primList.SetName("primList");
constrPBDBuffer.topol.indices.SetName("Indices");
constrPBDBuffer.color.SetName("color");
constrPBDBuffer.prdColor.SetName("prdcolor");
constrPBDBuffer.sortedPrimId.SetName("sortedPrimId");
if ((DISTANCE & ct) == DISTANCE)
{
constrPBDBuffer.InitDistanceConstr(meshTopol.posBuffer, stiffnessSetting[0], resY, resX);
}
if ((BENDING & ct) == BENDING)
{
constrPBDBuffer.InitBendingConstr();
}
if ((ANCHOR & ct) == ANCHOR)
{
constrPBDBuffer.InitAnchorConstr(meshTopol.posBuffer, -1.0f, resY);
}
constrPBDBuffer.GenePoint2PrimsMap(constrPBDBuffer.topol);
constrPBDBuffer.GenePrim2PrimsMap(constrPBDBuffer.topol);
if (ht == GPU)
{
InitGPUBuffers();
constrPBDBuffer.EdgeColoring(20000);
}
}
void PBDObject::initPosition(float2 cord)
{
auto positionBuffer = &(meshTopol.posBuffer);
float lengthInterval = sizeX / (resX - 1);
float heightInterval = sizeY / (resY - 1);
int num = resY * resX;
int index = 0;
for (int i = 0; i < resY; i++)
{
for (int j = 0; j < resX; j++)
{
float3 p;
p.x = cord.x + j * lengthInterval;
p.y = 0;
p.z = cord.y + i * heightInterval;
positionBuffer->m_Data.push_back(p);
index++;
}
}
constrPBDBuffer.restPosBuffer.m_Data.push_back(positionBuffer->m_Data[0]);
constrPBDBuffer.restPosBuffer.m_Data.push_back(positionBuffer->m_Data[resX - 1]);
}
void PBDObject::initMeshTopolIndices()
{
auto meshTopolIndicies = &(meshTopol.indices);
int num = resY * resX;
for (int i = 0; i < num - resX; i++)
{
if (i % resX == resX - 1)
continue;
meshTopolIndicies->m_Data.push_back(i);
meshTopolIndicies->m_Data.push_back(i + resX);
meshTopolIndicies->m_Data.push_back(i + resX + 1);
meshTopolIndicies->m_Data.push_back(i);
meshTopolIndicies->m_Data.push_back(i + resX + 1);
meshTopolIndicies->m_Data.push_back(i + 1);
}
}
void PBDObject::groundTruthTest()
{
vector<int> arr0 = { 0, 1, 3, 3, 2, 0 };
vector<int> arr1 = { 0,3,0,1,0,2,1,3,2,3 };
constrPBDBuffer.topol.indices.m_Data = arr1;
vector<int2> arr2 = { make_int2(0,2), make_int2(2,2), make_int2(4,2), make_int2(6,2), make_int2(8,2) };
constrPBDBuffer.topol.primList.m_Data = arr2;
vector<int> arr3 = { -1,-1,-1,-1,-1 };
constrPBDBuffer.color.m_Data = arr3;
constrPBDBuffer.prdColor.m_Data = arr3;
}
void PBDObject::Save(string path)
{
PBD_DEBUG;
std::ofstream ofs(path);
if (!ofs.is_open())
return;
ofs << "Header|float3Buffer,6;floatBuffer,2;int2Buffer,2;intBuffer,3;float3,1;float,1" << endl; //HEADER
meshTopol.indices.SetName("meshTopol indices");
meshTopol.posBuffer.SetName("meshTopol posBuffer");
meshTopol.primList.SetName("meshTopol primList");
meshTopol.Save(ofs);
constrPBDBuffer.Save(ofs);
velBuffer.SetName("velBuffer");
IO::SaveBuffer(velBuffer, ofs);
massBuffer.SetName("massBuffer");
IO::SaveBuffer(massBuffer, ofs);
IO::SaveData(dampingRate, "dampingRate", ofs);
IO::SaveData(gravity, "gravity", ofs);
ofs.flush();
ofs.close();
}
void PBDObject::SaveMeshTopol(string path)
{
PBD_DEBUG;
std::ofstream ofs(path);
if (!ofs.is_open())
return;
//ofs << "Header|float3Buffer,3;int2Buffer,1;intBuffer,1;float3,1;float,1" << endl; //HEADER
meshTopol.Save(ofs);
velBuffer.SetName("velBuffer");
IO::SaveBuffer(velBuffer, ofs);
massBuffer.SetName("massBuffer");
IO::SaveBuffer(massBuffer, ofs);
IO::SaveData(dampingRate, "dampingRate", ofs);
IO::SaveData(gravity, "gravity", ofs);
ofs.flush();
ofs.close();
}
void PBDObject::SaveConstraint(string path)
{
PBD_DEBUG;
std::ofstream ofs(path);
if (!ofs.is_open())
return;
//ofs << "Header|float3Buffer,3;floatBuffer,2;int2Buffer,1;intBuffer,2" << endl; //HEADER
constrPBDBuffer.Save(ofs);
ofs.flush();
ofs.close();
}
void PBDObject::Read(string path)
{
}
// ----------------------------------------------------------------------------------------
// SolverPBD class
// kernel functions for SolverPBD class
void __global__ AdvectGPUKernel(
int pointNum,
float dt,
float dampingRate,
float3 gravity,
float* mass,
float3* velBuffer,
float3* prdPBuffer,
float3* positionBuffer)
{
int pointId = blockIdx.x * blockDim.x + threadIdx.x;
// if(primId < 5) printf("\tResolveConflictsGPU-%d: prdColor %d\n", primId, prdColor[primId]);
if (pointId >= pointNum)
return;
/*if (i == 30)
printf("old velocity Buffer: %f, %f, %f \n", velBuffer->m_Data[i].x, velBuffer->m_Data[i].y, velBuffer->m_Data[i].z);*/
velBuffer[pointId] += gravity * dt * mass[pointId];
/*if(i == 30)
printf("new velocity Buffer: %f, %f, %f \n", velBuffer->m_Data[i].x, velBuffer->m_Data[i].y, velBuffer->m_Data[i].z);*/
velBuffer[pointId] *= powf(dampingRate, dt);
prdPBuffer[pointId] = positionBuffer[pointId] + velBuffer[pointId] * dt;
//printf("postion Buffer: %f, %f, %f \n", prdPBuffer.m_Data[j].x, prdPBuffer.m_Data[j].y, prdPBuffer.m_Data[j].z);
/*printf("Advect: prdPBuffer: \n");
printf("(%f,%f,%f)", prdPBuffer[pointId].x, prdPBuffer[pointId].y, prdPBuffer[pointId].z);*/
}
void SolverPBD::Advect(float dt)
{
PBD_DEBUG;
switch (m_ht) // TODO: change back to ht
{
case CPU:
advectCPU(dt);
break;
case GPU:
advectGPU(dt);
break;
default:
break;
}
}
void SolverPBD::advectCPU(float dt)
{
auto velBuffer = &(m_pbdObj->velBuffer);
auto prdPBuffer = &(m_pbdObj->constrPBDBuffer.prdPBuffer);
auto positionBuffer = &(m_pbdObj->meshTopol.posBuffer);
for (int i = 0; i < velBuffer->GetSize(); i++)
{
velBuffer->m_Data[i] += m_pbdObj->gravity * dt;
prdPBuffer->m_Data[i] = positionBuffer->m_Data[i] + velBuffer->m_Data[i] * dt;
}
}
void SolverPBD::advectGPU(float dt)
{
auto velBuffer = &(m_pbdObj->velBuffer);
auto prdPBuffer = &(m_pbdObj->constrPBDBuffer.prdPBuffer);
auto positionBuffer = &(m_pbdObj->meshTopol.posBuffer);
auto massBuffer = &(m_pbdObj->massBuffer);
//printf("Before Advect GPU:");
//printf("point 0: %f, %f, %f; point col-1: %f, %f, %f\n",
// prdPBuffer->m_Data[0].x, prdPBuffer->m_Data[0].y, prdPBuffer->m_Data[0].z,
// prdPBuffer->m_Data[pbdObj->resY-1].x, prdPBuffer->m_Data[pbdObj->resProjectConstraintGPUY - 1].y,
// prdPBuffer->m_Data[pbdObj->resY - 1].z );
int pointNum = prdPBuffer->GetSize();
float dampingRate = m_pbdObj->dampingRate;
float3 gravity = m_pbdObj->gravity;
uint2 blockSize = positionBuffer->EvalBlockSize(512);
//printf("Advect GPU: block dim = %d, thread dim = %d\n", blockSize.x, blockSize.y);
// Host To Device: load buffers for Advect
// edge coloring SPMD
AdvectGPUKernel << < blockSize.x, blockSize.y >> > (pointNum,
dt,
dampingRate,
gravity,
(float*)massBuffer->GetDevicePtr(),
(float3*)velBuffer->GetDevicePtr(),
(float3*)prdPBuffer->GetDevicePtr(),
(float3*)positionBuffer->GetDevicePtr());
// Device To Host: load buffers back
//if (prdPBuffer->LoadToHost())
// printf("Advect GPU: Load prdPBuffer Back Succeeded!\n");
//if (velBuffer->LoadToHost())
// printf("Advect GPU: Load velBuffer Back Succeeded!\n");
//printf("After Advect GPU:");
//printf("point 0: %f, %f, %f; point col-1: %f, %f, %f\n",
// prdPBuffer->m_Data[0].x, prdPBuffer->m_Data[0].y, prdPBuffer->m_Data[0].z,
// prdPBuffer->m_Data[pbdObj->resY - 1].x, prdPBuffer->m_Data[pbdObj->resY - 1].y,
// prdPBuffer->m_Data[pbdObj->resY - 1].z);
}
void SolverPBD::ProjectConstraint(SolverType st, int iterations)
{
m_pbdSolverTimer->Tick();
switch (m_ht)
{
case CPU:
projectConstraintCPU(st, iterations);
break;
case GPU:
projectConstraintGPU(st, iterations);
break;
default:
break;
}
m_pbdSolverTimer->Tock();
PBD_DEBUGTIME(m_pbdSolverTimer->GetFuncTime());
}
void SolverPBD::ProjectConstraintWithColli(SolverType st, int iterations, CollisionSolver* colliSolver,
BufferVector3f& fixedBuffer, BufferVector3f& vFixedBuffer, BufferVector3f& fFixedBuffer, int debug)
{
m_pbdSolverTimer->Tick();
switch (m_ht)
{
case CPU:
projectConstraintWithColliCPU(st, iterations, colliSolver, fixedBuffer, vFixedBuffer, fFixedBuffer, debug);
break;
case GPU:
projectConstraintGPU(st, iterations);
break;
default:
break;
}
m_pbdSolverTimer->Tock();
}
void SolverPBD::projectConstraintCPU(SolverType st, int iterations)
{
auto primList = &(m_pbdObj->constrPBDBuffer.topol.primList);
auto prdPBuffer = &(m_pbdObj->constrPBDBuffer.prdPBuffer);
auto massBuffer = &(m_pbdObj->massBuffer);
auto restLengthBuffer = &(m_pbdObj->constrPBDBuffer.restLengthBuffer);
auto stiffnessBuffer = &(m_pbdObj->constrPBDBuffer.stiffnessBuffer);
// auto restPosBuffer = &(m_pbdObj->constrPBDBuffer.restPosBuffer);
auto indices = &(m_pbdObj->constrPBDBuffer.topol.indices);
for (size_t ii = 0; ii < iterations; ii++)
{
for (size_t i = 0; i < primList->GetSize(); i++)
{
if (primList->m_Data[i].y != 2)
continue;
int i0 = indices->m_Data[primList->m_Data[i].x];
int i1 = indices->m_Data[primList->m_Data[i].x + 1];
float3 dp1;
float3 dp2;
float d = Distance(prdPBuffer->m_Data[i0], prdPBuffer->m_Data[i1]);
float3 r = prdPBuffer->m_Data[i0] - prdPBuffer->m_Data[i1];
r = normalize(r);
dp1.x = -massBuffer->m_Data[i0] / (massBuffer->m_Data[i0] + massBuffer->m_Data[i1]) * (d - restLengthBuffer->m_Data[i]) *r.x ;
dp1.y = -massBuffer->m_Data[i0] / (massBuffer->m_Data[i0] + massBuffer->m_Data[i1]) * (d - restLengthBuffer->m_Data[i]) *r.y ;
dp1.z = -massBuffer->m_Data[i0] / (massBuffer->m_Data[i0] + massBuffer->m_Data[i1]) * (d - restLengthBuffer->m_Data[i]) *r.z ;
dp2.x = massBuffer->m_Data[i1] / (massBuffer->m_Data[i0] + massBuffer->m_Data[i1]) * (d - restLengthBuffer->m_Data[i]) *r.x ;
dp2.y = massBuffer->m_Data[i1] / (massBuffer->m_Data[i0] + massBuffer->m_Data[i1]) * (d - restLengthBuffer->m_Data[i]) *r.y ;
dp2.z = massBuffer->m_Data[i1] / (massBuffer->m_Data[i0] + massBuffer->m_Data[i1]) * (d - restLengthBuffer->m_Data[i]) *r.z ;
float k = 1;// -powf(1 - stiffnessBuffer->m_Data[i], 1.0 / (ii + 1));
dp1 *= k;
dp2 *= k;
prdPBuffer->m_Data[i0] += dp1;
prdPBuffer->m_Data[i1] += dp2;
}
ColliWithShpGrd();
}
}
void SolverPBD::projectConstraintWithColliCPU(SolverType st, int iterations, CollisionSolver* colliSolver,
BufferVector3f& fixedBuffer, BufferVector3f& vFixedBuffer, BufferVector3f& fFixedBuffer, int debug)
{
PBD_DEBUG;
int debugFrameId = 1;
auto primList = &(m_pbdObj->constrPBDBuffer.topol.primList);
auto prdPBuffer = &(m_pbdObj->constrPBDBuffer.prdPBuffer);
auto massBuffer = &(m_pbdObj->massBuffer);
auto restLengthBuffer = &(m_pbdObj->constrPBDBuffer.restLengthBuffer);
auto stiffnessBuffer = &(m_pbdObj->constrPBDBuffer.stiffnessBuffer);
// auto restPosBuffer = &(m_pbdObj->constrPBDBuffer.restPosBuffer);
auto indices = &(m_pbdObj->constrPBDBuffer.topol.indices);
colliSolver->afterProjPrdpBuffer = m_pbdObj->constrPBDBuffer.prdPBuffer;
for (size_t ii = 0; ii < iterations; ii++)
{
if ((ii % 10 ==0) || (ii == iterations - 1)) // 0 10 20 30 || == -1
{
colliSolver->CCD_SH_Extended();
//colliSolver->CCD_SH();
printf("contact size: %d\n", colliSolver->contactData.ctxs.GetSize());
}
//printInfo("--- in project", prdPBuffer->m_Data[1]);
//for (int i = 0; i <= 3; ++i)
//{
for (size_t i = 0; i < primList->GetSize(); i++)
{
if (primList->m_Data[i].y != 2)
continue;
int i0 = indices->m_Data[primList->m_Data[i].x];
int i1 = indices->m_Data[primList->m_Data[i].x + 1];
float3 dp1;
float3 dp2;
float d = Distance(prdPBuffer->m_Data[i0], prdPBuffer->m_Data[i1]);
float3 r = prdPBuffer->m_Data[i0] - prdPBuffer->m_Data[i1];
r = normalize(r);
dp1.x = -massBuffer->m_Data[i0] / (massBuffer->m_Data[i0] + massBuffer->m_Data[i1]) * (d - restLengthBuffer->m_Data[i]) * r.x;
dp1.y = -massBuffer->m_Data[i0] / (massBuffer->m_Data[i0] + massBuffer->m_Data[i1]) * (d - restLengthBuffer->m_Data[i]) * r.y;
dp1.z = -massBuffer->m_Data[i0] / (massBuffer->m_Data[i0] + massBuffer->m_Data[i1]) * (d - restLengthBuffer->m_Data[i]) * r.z;
dp2.x = massBuffer->m_Data[i1] / (massBuffer->m_Data[i0] + massBuffer->m_Data[i1]) * (d - restLengthBuffer->m_Data[i]) * r.x;
dp2.y = massBuffer->m_Data[i1] / (massBuffer->m_Data[i0] + massBuffer->m_Data[i1]) * (d - restLengthBuffer->m_Data[i]) * r.y;
dp2.z = massBuffer->m_Data[i1] / (massBuffer->m_Data[i0] + massBuffer->m_Data[i1]) * (d - restLengthBuffer->m_Data[i]) * r.z;
float k = 1;// -powf(1 - stiffnessBuffer->m_Data[i], 1.0 / (ii + 1));
dp1 *= k;
dp2 *= k;
prdPBuffer->m_Data[i0] += dp1;
prdPBuffer->m_Data[i1] += dp2;
}
//}
//colliSolver->CollisionResolve();
/*string beforeResolvePath = "D://0326Test//testData//testBeforeResolve." + to_string(debug * iterations + ii) + ".cache";
m_pbdObj->constrPBDBuffer.prdPBuffer.SetName("P");
Topology tempBeforeResolve;
tempBeforeResolve.indices = m_pbdObj->meshTopol.indices;
tempBeforeResolve.primList = m_pbdObj->meshTopol.primList;
tempBeforeResolve.posBuffer = m_pbdObj->constrPBDBuffer.prdPBuffer;
tempBeforeResolve.indices.SetName("Indices");
tempBeforeResolve.primList.SetName("primList");
tempBeforeResolve.posBuffer.SetName("P");
IO::SaveToplogy(tempBeforeResolve, beforeResolvePath);*/
//for (int i = 0; i <= 2; ++i)
//{
colliSolver->CollisionResolveNew(fixedBuffer, vFixedBuffer, fFixedBuffer, (debug * iterations + ii), ii, debugFrameId);
//}
/*string path = "D://0326Test//testData//test." + to_string(debug*iterations + ii) + ".cache";
m_pbdObj->constrPBDBuffer.prdPBuffer.SetName("P");
Topology temp;
temp.indices = m_pbdObj->meshTopol.indices;
temp.primList = m_pbdObj->meshTopol.primList;
temp.posBuffer = m_pbdObj->constrPBDBuffer.prdPBuffer;
temp.indices.SetName("Indices");
temp.primList.SetName("primList");
temp.posBuffer.SetName("P");
IO::SaveToplogy(temp, path);*/
//printInfo("--- after resolve", prdPBuffer->m_Data[1]);
//printf("--------------------itreation %d-------------------\n", ii);
ColliWithShpGrd();
}
}
// Attach Points
//for (size_t j = 0; j < prdPBuffer->GetSize(); j++)
//{
// //attach points
// if (j == 0)
// {
// prdPBuffer->m_Data[j] = restPosBuffer->m_Data[0];
// }
// if (j == m_pbdObj->resY - 1)
// {
// prdPBuffer->m_Data[j] = restPosBuffer->m_Data[1];
// }
// ////point collide with sphere
// //bool isCollideSphere = ColliderSphere(prdPBuffer.m_Data[j], sphereOrigin, sphereRadius, j);
// //if (isCollideSphere) //move the point to the point which intersect with sphere
// //{
// // float3 moveVector = GenerateMoveVectorSphere(sphereOrigin, sphereRadius, prdPBuffer.m_Data[j], j);
// // prdPBuffer.m_Data[j] += moveVector;
// //}
// ////point collide with ground
// //bool isCollideGoround = CollideGround(prdPBuffer.m_Data[j], groundCenter);
// //if (isCollideGoround)
// //{
// // prdPBuffer.m_Data[j].y = groundCenter.y;
// //}
//}
void __global__ ProjectContraintsGPUKernel(
int resY,
int start,
int num,
int iteration,
int* sortedPrimId,
int* indices,
int* constraintType,
float* massBuffer,
float* restLengthBuffer,
float* stiffnessBuffer,
int2* primList,
float3* prdPBuffer,
float3* restBuffer)
{
int idx = blockIdx.x * blockDim.x + threadIdx.x;
if (idx >= num)
return;
//printf("idx: %d ---- ", idx + start);
auto primId = sortedPrimId[idx + start];
//printf("idx + start: %d, primId: %d \n", idx + start, primId);
if (constraintType[primId] == DISTANCE)
{
int i0 = indices[primList[primId].x];
int i1 = indices[primList[primId].x + 1];
float3 p0 = prdPBuffer[i0];
float3 p1 = prdPBuffer[i1];
/*printf("Project Constraint: prdPBuffer: ");
printf("i0: (%f,%f,%f)", prdPBuffer[i0].x, prdPBuffer[i0].y, prdPBuffer[i0].z);
printf("i1: (%f,%f,%f)\n", prdPBuffer[i1].x, prdPBuffer[i1].y, prdPBuffer[i1].z);*/
float3 dp0;
float3 dp1;
float d = Distance(p0, p1);
float3 v;
v = p0 - p1;
//printf("mass: %f", massBuffer[i0]);
dp0 = -massBuffer[i0] / (massBuffer[i0] + massBuffer[i1]) * (d - restLengthBuffer[primId]) * v / d;
dp1 = massBuffer[i1] / (massBuffer[i0] + massBuffer[i1]) * (d - restLengthBuffer[primId]) * v / d;
float k = 1 - powf(1 - stiffnessBuffer[primId], 1.0 / (iteration + 1));
/*printf("dp0: (%f,%f,%f) ; ", dp0.x, dp0.y, dp0.z);
printf("dp1: (%f,%f,%f)", dp1.x, dp1.y, dp1.z);
printf("d: %f, k: %f \n",d, k);*/
dp0 *= k;
dp1 *= k;
prdPBuffer[i0] += dp0;
prdPBuffer[i1] += dp1;
/*printf("Project Constraint: prdPBuffer: ");
printf("i0: (%f,%f,%f)", prdPBuffer[i0].x, prdPBuffer[i0].y, prdPBuffer[i0].z);
printf("i1: (%f,%f,%f)\n", prdPBuffer[i1].x, prdPBuffer[i1].y, prdPBuffer[i1].z);*/
}
if (constraintType[primId] == ANCHOR)
{
int i = indices[primList[primId].x];
if (i == 0)
prdPBuffer[i] = restBuffer[0];
if (i == (resY - 1))
prdPBuffer[i] = restBuffer[1];
}
}
void SolverPBD::projectConstraintGPU(SolverType st, int iterations)
{
m_pbdObj->constrPBDBuffer.SortEdgesColors();
m_pbdObj->constrPBDBuffer.EvalWorksets();
// cout << "--------" << __FUNCTION__ << "--------" << endl;
auto worksets = &(m_pbdObj->constrPBDBuffer.colorWorksets);
auto primList = &(m_pbdObj->constrPBDBuffer.topol.primList);
auto sortedPrimId = &(m_pbdObj->constrPBDBuffer.sortedPrimId);
auto prdPBuffer = &(m_pbdObj->constrPBDBuffer.prdPBuffer);
auto massBuffer = &(m_pbdObj->massBuffer);
auto restLengthBuffer = &(m_pbdObj->constrPBDBuffer.restLengthBuffer);
auto stiffnessBuffer = &(m_pbdObj->constrPBDBuffer.stiffnessBuffer);
auto restPosBuffer = &(m_pbdObj->constrPBDBuffer.restPosBuffer);
auto indices = &(m_pbdObj->constrPBDBuffer.topol.indices);
auto constraintType = &(m_pbdObj->constrPBDBuffer.constraintType);
for (int i = 0; i < iterations; ++i)
{
for (auto workset : worksets->m_Data)
{
int start = workset.x;
int num = workset.y;
int numBlock = 1;
int numThread = 512;
if (num > numThread && num < 1024)
numThread = num;
else if (num > 1024)
numBlock = ceil(num / 512);
//printf("----------------------------\n");
//printf(" numBlock: %d numThread: %d\n", numBlock, numThread);
/*printf("indices: ");
for (int i = 0; i < indices->GetSize(); ++i)
{
cout << indices->m_Data[i] << "-";
}
printf("\n");
printf("constraintType: ");
for (int i = 0; i < constraintType->GetSize(); ++i)
{
cout << constraintType->m_Data[i] << "-";
}
printf("\n");
printf("massBuffer: ");
for (int i = 0; i < massBuffer->GetSize(); ++i)
{
cout << massBuffer->m_Data[i] << "-";
}
printf("\n");
printf("restLengthBuffer: ");
for (int i = 0; i < restLengthBuffer->GetSize(); ++i)
{
cout << restLengthBuffer->m_Data[i] << "-";
}
printf("\n");
printf("stiffnessBuffer: ");
for (int i = 0; i < stiffnessBuffer->GetSize(); ++i)
{
cout << stiffnessBuffer->m_Data[i] << "-";
}*/
/*printf("prdPBuffer: \n");
for (int i = 0; i < prdPBuffer->GetSize(); ++i)
{
cout << "(" << prdPBuffer->m_Data[i].x << "," << prdPBuffer->m_Data[i].y << "," << prdPBuffer->m_Data[i].z << ")" << endl;
}
printf("\n");
printf("primList: \n");
for (int i = 0; i < primList->GetSize(); ++i)
{
cout << "(" << primList->m_Data[i].x << "," << primList->m_Data[i].y << ")" << endl;
}*/
ProjectContraintsGPUKernel << <numBlock, numThread >> > (
m_pbdObj->resY,
start,
num,
i,
((int*)sortedPrimId->GetDevicePtr()),
(int*)indices->GetDevicePtr(),
(int*)constraintType->GetDevicePtr(),
(float*)massBuffer->GetDevicePtr(),
(float*)restLengthBuffer->GetDevicePtr(),
(float*)stiffnessBuffer->GetDevicePtr(),
(int2*)primList->GetDevicePtr(),
(float3*)prdPBuffer->GetDevicePtr(),
(float3*)restPosBuffer->GetDevicePtr());
/*hipError_t cudaStatus = hipGetLastError();
if (cudaStatus != hipSuccess)
{
fprintf(stderr, "addKernel launch failed: %s\n", hipGetErrorString(cudaStatus));
}*/
}
}
//{
// // TODO: edge coloring
// /*
// int* color
// int* sortedColor
// int* sortedPrim
// int main
// {
// sort(color, sortedColor, sortedPrim)
// //color: 11220001110222
// //sortedColor:000111222
// //sortedPrim:
// int2 workSets = Eval(SortedColor)
// //workSets.x start
// //workSets.y num
// for(auto workset: workSets)
// {
// int start = workSets.x
// int num = workSets.y
// kernel project<<<numBlock, numThread>>>(num, sortedPrim+start, prdPbuffer, restlength)
// {
// if(index > = num)
// return;
// int2 prim = sortedPrimId[index]
// int i0 = primList[prim.x]
// int i1 = primList[prim.x + 1]
// float3 p0 = prdPbuffer[i0]
// float3 p1 = prdPbuffer[i1]
// }
// }
// }
//}
}
void SolverPBD::Integration(float dt)
{
PBD_DEBUG;
switch (m_ht) // TODO: change back to ht
{
case CPU:
integrationCPU(dt);
break;
case GPU:
integrationGPU(dt);
break;
default:
break;
}
}
void SolverPBD::integrationCPU(float dt)
{
auto positionBuffer = &(m_pbdObj->meshTopol.posBuffer);
auto velBuffer = &(m_pbdObj->velBuffer);
auto prdPBuffer = &(m_pbdObj->constrPBDBuffer.prdPBuffer);
for (size_t i = 0; i < positionBuffer->GetSize(); i++)
{
velBuffer->m_Data[i] = (prdPBuffer->m_Data[i] - positionBuffer->m_Data[i]) / dt;
positionBuffer->m_Data[i] = prdPBuffer->m_Data[i];
}
}
void __global__ IntegrationGPUKernel(
int pointNum,
float dt,
float3* velBuffer,
float3* prdPBuffer,
float3* positionBuffer)
{
int pointId = blockIdx.x * blockDim.x + threadIdx.x;
// if(primId < 5) printf("\tResolveConflictsGPU-%d: prdColor %d\n", primId, prdColor[primId]);
if (pointId >= pointNum)
return;
velBuffer[pointId] = (prdPBuffer[pointId] - positionBuffer[pointId]) / dt;
positionBuffer[pointId] = prdPBuffer[pointId];
}
// TODO
void SolverPBD::integrationGPU(float dt)
{
auto positionBuffer = &(m_pbdObj->meshTopol.posBuffer);
auto velBuffer = &(m_pbdObj->velBuffer);
auto prdPBuffer = &(m_pbdObj->constrPBDBuffer.prdPBuffer);
/*printf("Before Integration GPU:");
printf("point 0: %f, %f, %f; point col-1: %f, %f, %f\n",
prdPBuffer->m_Data[0].x, prdPBuffer->m_Data[0].y, prdPBuffer->m_Data[0].z,
prdPBuffer->m_Data[pbdObj->resY - 1].x, prdPBuffer->m_Data[pbdObj->resY - 1].y,
prdPBuffer->m_Data[pbdObj->resY - 1].z);*/
int pointNum = prdPBuffer->GetSize();
uint2 blockSize = positionBuffer->EvalBlockSize(512);
//printf("Integration GPU: block dim = %d, thread dim = %d\n", blockSize.x, blockSize.y);
IntegrationGPUKernel << < blockSize.x, blockSize.y >> > (pointNum,
dt,
(float3*)velBuffer->GetDevicePtr(),
(float3*)prdPBuffer->GetDevicePtr(),
(float3*)positionBuffer->GetDevicePtr());
// Device To Host: load buffers back
/*if (positionBuffer->LoadToHost())
printf("Integration GPU: Load positionBuffer Back Succeeded!\n");
if (velBuffer->LoadToHost())
printf("Integration GPU: Load velBuffer Back Succeeded!\n");*/
//printf("After Integration GPU:");
//printf("point 0: %f, %f, %f; point col-1: %f, %f, %f\n",
// positionBuffer->m_Data[0].x, positionBuffer->m_Data[0].y, positionBuffer->m_Data[0].z,
// positionBuffer->m_Data[pbdObj->resY - 1].x, positionBuffer->m_Data[pbdObj->resY - 1].y,
// positionBuffer->m_Data[pbdObj->resY - 1].z);
}
void SolverPBD::ColliWithShpGrd()
{
auto posBuffer = &(m_pbdObj->meshTopol.posBuffer);
auto prdPBuffer = &(m_pbdObj->constrPBDBuffer.prdPBuffer);
for (int vtxId = 0; vtxId < posBuffer->GetSize(); ++vtxId)
{
//point collide with sphere
bool isCollideSphere = ColliderSphere(prdPBuffer->m_Data[vtxId], m_sphereCenter, m_sphereRadius);
if (isCollideSphere) //move the point to the point which intersect with sphere
{
float3 moveVector = GenerateMoveVectorSphere(m_sphereCenter, m_sphereRadius, prdPBuffer->m_Data[vtxId]);
prdPBuffer->m_Data[vtxId] += moveVector;
}
//point collide with ground
//bool isCollideGoround = CollideGround(prdPBuffer->m_Data[vtxId], m_groundHeight);
//if (isCollideGoround)
//{
// prdPBuffer->m_Data[vtxId].y = m_groundHeight;
//}
}
}
bool SolverPBD::ColliderSphere(float3 pointPos, float3 sphereOrigin, float r)
{
float d = Distance(pointPos, sphereOrigin);
if (d - r > 0.001)
{
return false;
}
else
{
return true;
}
}
bool SolverPBD::CollideGround(float3 pointPos, float groundHeight)
{
if (pointPos.y - groundHeight < 0.001)
{
return true;
}
else
{
return false;
}
}
float3 SolverPBD::GenerateMoveVectorSphere(float3 sphereOrigin, float sphereRadius, float3 p)
{
float moveDistance = sphereRadius - Distance(sphereOrigin, p);
float3 moveDirection = (p - sphereOrigin) / Distance(sphereOrigin, p);
float3 moveLength = moveDirection * moveDistance;
return moveLength;
}
// ------------------Topology---------------------
void Topology::Save(std::ofstream& ofs)
{
IO::SaveBuffer(indices, ofs);
IO::SaveBuffer(posBuffer, ofs);
IO::SaveBuffer(primList, ofs);
}
| ea6fb4df296beca12d46ad8abf78bee011832c50.cu | #include "PBD_Basic.cuh"
#include"CCD_Basic.h"
KERNEL_FUNC float Distance(float3 p1, float3 p2)
{
return powf(powf((p1.x - p2.x), 2) + powf((p1.y - p2.y), 2) + powf((p1.z - p2.z), 2), 0.5);
}
// ----------------------------------------------------------------------------------------
// ConstraintPBD
void ConstraintPBD::InitDistanceConstr(BufferVector3f& meshPosBuffer, float stiffness, int resY, int resX)
{
// cout << __FUNCTION__ << " resolution:" << resY << "----" << resX << std::endl;
InitDistanceIndices(resY, resX);
InitDistanceInfo(meshPosBuffer, stiffness);
}
void ConstraintPBD::InitDistanceIndices(int resY, int resX)
{
int num = resY * resX;
for (int i = 0; i < num - resX; i++)
{
if (i % resX == 0)
{
topol.indices.m_Data.push_back(i);
topol.indices.m_Data.push_back(i + 1);
topol.indices.m_Data.push_back(i);
topol.indices.m_Data.push_back(i + resX);
}
else if (i % resX == resX - 1)
{
topol.indices.m_Data.push_back(i);
topol.indices.m_Data.push_back(i + resX);
topol.indices.m_Data.push_back(i);
topol.indices.m_Data.push_back(i + resX - 1);
}
else
{
topol.indices.m_Data.push_back(i);
topol.indices.m_Data.push_back(i + 1);
topol.indices.m_Data.push_back(i);
topol.indices.m_Data.push_back(i + resX);
topol.indices.m_Data.push_back(i);
topol.indices.m_Data.push_back(i + resX - 1);
}
}
for (int i = num - resX; i < num - 1; ++i)
{
topol.indices.m_Data.push_back(i);
topol.indices.m_Data.push_back(i + 1);
}
}
void ConstraintPBD::InitDistanceInfo(BufferVector3f& meshPosBuffer, float stiffness)
{
//
// constraint allocation
//
int count = 2;
int prims = topol.indices.GetSize() / count;
//cout << __FUNCTION__ <<" prims:"<< prims << std::endl;
for (int i = 0; i < prims; i++)
{
// init primList & sortedPrimID
int2 p;
p.x = i * count;
p.y = count;
topol.primList.m_Data.push_back(p);
sortedPrimId.m_Data.push_back(i); // WARNING: sortedPrimID JUST for now (without collision)
// init stiffness
stiffnessBuffer.m_Data.push_back(stiffness);
// init rest length
int i0 = topol.indices.m_Data[p.x];
int i1 = topol.indices.m_Data[p.x + 1];
float d = Distance(meshPosBuffer.m_Data[i0], meshPosBuffer.m_Data[i1]);
restLengthBuffer.m_Data.push_back(d);
// init contraint type
constraintType.m_Data.push_back(DISTANCE);
// init color
color.m_Data.push_back(-1);
prdColor.m_Data.push_back(-1);
}
/*
for (int a = 0; a < 2; a++)
{
int2 p;
p.x = indices.GetSize();
p.y = 1;
constrPBDBuffer.color.m_Data.push_back(-1);
constrPBDBuffer.prdColor.m_Data.push_back(-1);
constrPBDBuffer.sortedColor.m_Data.push_back(-1);
constrPBDBuffer.constraintType.m_Data.push_back(ANCHOR);
constrPBDBuffer.stiffness.m_Data.push_back(1.0);
indices.m_Data.push_back(a == 0 ? 0 : resY - 1);
primList.m_Data.push_back(p);
sortedPrimID.m_Data.push_back(prims);
std::cout << "primList:" << primList.GetSize() << std::endl;
std::cout << "indices:" << indices.m_Data[indices.GetSize()] << std::endl;
}
*/
}
void ConstraintPBD::InitBendingConstr()
{
//todo
//init restAngleBuffer;
/*
for (auto edge : edge2primMap)
{
//edge2primMap numEdge*2
// ��������
// edge 2 prim
// int2 List;
}
*/
}
void ConstraintPBD::InitAnchorConstr(BufferVector3f& meshPosBuffer, float stiffness, int resY)
{
int currPrimSize = sortedPrimId.GetSize();
// std::cout << __FUNCTION__ << std::endl;
for (int a = 0; a < 2; a++)
{
int idx = (a == 0 ? 0 : resY - 1);
// init primList
int2 p;
p.x = topol.indices.GetSize();
p.y = 1;
topol.primList.m_Data.push_back(p);
// init indices
topol.indices.m_Data.push_back(idx);
// init restPosBuffer
restPosBuffer.m_Data.push_back(meshPosBuffer.m_Data[idx]);
// init stiffnessBuffer
stiffnessBuffer.m_Data.push_back(stiffness);
// init constraintType
constraintType.m_Data.push_back(ANCHOR);
//init color prdColor sortedPrimID
color.m_Data.push_back(stiffness);
prdColor.m_Data.push_back(stiffness);
sortedPrimId.m_Data.push_back(currPrimSize + a);
/*std::cout << "primList:" << primList.GetSize() << std::endl;
std::cout << "indices:" << indices.m_Data[indices.GetSize()] << std::endl;*/
}
}
void ConstraintPBD::GenePoint2PrimsMap(Topology topol)
{
auto primList = &(topol.primList);
auto indices = &(topol.indices);
for (int primId = 0; primId < primList->GetSize(); ++primId)
{
int2 currPrim = primList->m_Data[primId];
for (int i = 0; i < currPrim.y; ++i)
{
Point2PrimsMap[indices->m_Data[currPrim.x + i]].push_back(primId);
}
}
// printf("generated point2prims map\n");
}
void ConstraintPBD::GenePrim2PrimsMap(Topology topol)
{
auto primList = &(topol.primList);
auto indices = &(topol.indices);
for (int primId = 0; primId < primList->GetSize(); ++primId)
{
//std::set<int> linkedPrimsSet;
std::vector<int> linkedPrimsSet;
int nptPrims = primList->m_Data[primId].y;
for (int ptId = 0; ptId < nptPrims; ++ptId)
{
int currPtId = indices->m_Data[primList->m_Data[primId].x + ptId];
// printf("primId: %d; ", primId);
auto linkedPrims = Point2PrimsMap[currPtId];
// printf("linked primtive id: ");
for (int nlp = 0; nlp < linkedPrims.size(); nlp++)
{
int linkPrimId = linkedPrims[nlp];
if (linkPrimId == primId)
continue;
linkedPrimsSet.push_back(linkPrimId);
}
}
int startIdx = Prim2PrimsMap.indices.GetSize();
Prim2PrimsMap.indices.m_Data.insert(
std::end(Prim2PrimsMap.indices.m_Data),
std::begin(linkedPrimsSet),
std::end(linkedPrimsSet));
Prim2PrimsMap.startNumList.m_Data.push_back(make_int2(startIdx, linkedPrimsSet.size()));
}
//printf("generated prim2prims map\n");
}
void ConstraintPBD::AssignColorsCPU()
{
auto p2pIndices = &(Prim2PrimsMap.indices);
auto p2pStartNumList = &(Prim2PrimsMap.startNumList);
for (int idx = 0; idx < topol.primList.GetSize(); idx++)
{
if (idx == 0)
detectConflict = 0;
if (color.m_Data[idx] >= 0)
continue;
int nidx = p2pStartNumList->m_Data[idx].x;
//int nlast = p2pStartNumList->m_Data[idx].x + p2pStartNumList->m_Data[idx].y;
int nlast = p2pStartNumList->m_Data[idx].x + p2pStartNumList->m_Data[idx].y;
int c = -1, offset = 0;
while (c < 0)
{
// Bit flag to store seen neighbor colors.
unsigned long forbidden = 0;
for (int i = nidx; i < nlast; ++i)
{
int n = p2pIndices->m_Data[i];
int nc = color.m_Data[n] - offset;
if (nc >= 0 && nc < FORBIDBITS)
forbidden |= (1ul << nc);
}
// Check if there's an open color in the current bitrange.
if (forbidden ^ MAXFORBID)
{
unsigned long x = forbidden;
c = offset;
// Find position of first zero bit.
x = (~x) & (x + 1);
// Color is log2(x)
while (x > 1)
{
x /= 2;
c++;
}
}
else
{
// Otherwise we need to try again with the next range of colors.
offset += FORBIDBITS;
}
}
// Record speculative coloring.
prdColor.m_Data[idx] = c;
}
}
void ConstraintPBD::ResolveConflictsCPU()
{
auto primList = &(topol.primList);
auto p2pIndices = &(Prim2PrimsMap.indices);
auto p2pStartNumList = &(Prim2PrimsMap.startNumList);
for (int idx = 0; idx < primList->GetSize(); ++idx)
{
// Nothing to do if already colored.
if (color.m_Data[idx] >= 0)
continue;
int nidx = p2pStartNumList->m_Data[idx].x;
int nlast = p2pStartNumList->m_Data[idx].x + p2pStartNumList->m_Data[idx].y;
int c = prdColor.m_Data[idx];
//int c = newcolor[idx];
int conflict = 0;
int npt = primList->m_Data[idx].y;
for (int i = nidx; i < nlast; ++i)
{
int n = p2pIndices->m_Data[i];
int pc = prdColor.m_Data[n];
// Check for conflict.
if (pc == c)
{
int nnpt = primList->m_Data[n].y;
// Resolution gives preference to primitives with more points,
// otherwise the one that comes first.
// (NOTE: We color in fewer iterations if we prioritize by number
// of graph neighbors, but then assuming we process prims by color,
// we're usually farther away from original point order leading to many
// cache misses and slower downstream processing.)
if (nnpt > npt ||
(nnpt == npt && n < idx))
{
conflict = 1;
break;
}
}
}
// If there's a conflict then reset sizes for more work,
// otherewise accept speculative color.
if (conflict)
{
detectConflict = primList->GetSize();
break;
}
else
color.m_Data[idx] = c;
}
}
// kernel functions for PBDObj class
void __global__ AssignColorsGPU(
int2* topolPrimList,
int* p2pIndices,
int2* p2pStartNumList,
int* color,
int* prdColor,
int* detectConflict,
int worksetLength)
{
int primId = blockIdx.x * blockDim.x + threadIdx.x;
// if(primId < 5) printf("%d: entering kernel1\n", primId);
if (primId >= worksetLength)
return;
if (primId == 0)
*detectConflict = 0;
if (color[primId] >= 0)
return;
int nidx = p2pStartNumList[primId].x;
int nlast = p2pStartNumList[primId].x + p2pStartNumList[primId].y;
int c = -1, offset = 0;
while (c < 0)
{
// Bit flag to store seen neighbor colors.
unsigned long forbidden = 0;
for (int i = nidx; i < nlast; ++i)
{
int n = p2pIndices[i];
int nc = color[n] - offset;
if (nc >= 0 && nc < FORBIDBITS)
forbidden |= (1ul << nc);
}
// Check if there's an open color in the current bitrange.
if (forbidden ^ MAXFORBID)
{
unsigned long x = forbidden;
c = offset;
// Find position of first zero bit.
x = (~x) & (x + 1);
// Color is log2(x)
while (x > 1)
{
x /= 2;
c++;
}
}
else
{
// Otherwise we need to try again with the next range of colors.
offset += FORBIDBITS;
}
}
// Record speculative coloring.
prdColor[primId] = c;
// if (primId < 5) printf("\tAssignColorsGPU-%d: prdColor %d\n", primId, prdColor[primId]);
}
void __global__ ResolveConflictsGPU(
int2* topolPrimList,
int* p2pIndices,
int2* p2pStartNumList,
int* color,
int* prdColor,
int* detectConflict,
int worksetLength)
{
int primId = blockIdx.x * blockDim.x + threadIdx.x;
// if(primId < 5) printf("\tResolveConflictsGPU-%d: prdColor %d\n", primId, prdColor[primId]);
if (primId >= worksetLength)
return;
if (color[primId] >= 0)
return;
int nidx = p2pStartNumList[primId].x;
int nlast = p2pStartNumList[primId].x + p2pStartNumList[primId].y;
int c = prdColor[primId];
int conflict = 0;
int npt = topolPrimList[primId].y;
for (int i = nidx; i < nlast; ++i)
{
int n = p2pIndices[i];
int pc = prdColor[n];
// Check for conflict.
if (pc == c)
{
int nnpt = topolPrimList[n].y;
// Resolution gives preference to primitives with more points,
// otherwise the one that comes first.
// (NOTE: We color in fewer iterations if we prioritize by number
// of graph neighbors, but then assuming we process prims by color,
// we're usually farther away from original point order leading to many
// cache misses and slower downstream processing.)
if (nnpt > npt ||
(nnpt == npt && n < primId))
{
conflict = 1;
break;
}
}
}
// If there's a conflict then reset sizes for more work,
// otherewise accept speculative color.
if (conflict)
*detectConflict = worksetLength;
else
color[primId] = c;
}
void ConstraintPBD::EdgeColoring(int iterations)
{
switch (ht)
{
case CPU:
EdgeColoringCPU(iterations);
break;
case GPU:
EdgeColoringGPU(iterations);
break;
default:
break;
}
}
void ConstraintPBD::EdgeColoringCPU(int iterations)
{
for (int i = 0; i < iterations; ++i)
{
/*printf("Before iteration %d color: ", i);
for (int i = 0; i < constrPBDBuffer.color.GetSize(); ++i)
{
printf("%d // ", constrPBDBuffer.color.m_Data[i]);
}
printf("\n");
printf("iteration %d prd color: ", i);
for (int i = 0; i < constrPBDBuffer.color.GetSize(); ++i)
{
printf("%d // ", constrPBDBuffer.color.m_Data[i]);
}
printf("\n");*/
AssignColorsCPU();
ResolveConflictsCPU();
/*printf("After iteration %d color: ", i);
for (int i = 0; i < constrPBDBuffer.color.GetSize(); ++i)
{
printf("%d // ", constrPBDBuffer.color.m_Data[i]);
}
printf("\n");
printf("iteration %d prd color: ", i);
for (int i = 0; i < constrPBDBuffer.color.GetSize(); ++i)
{
printf("%d // ", constrPBDBuffer.color.m_Data[i]);
}
printf("\n");
if (detectConflict == 0)
break;
string fileName = "D:/colorEdge/color." + to_string(i) + ".cache";
IO::SaveBuffer(constrPBDBuffer.prdColor, fileName);
cout << "color saved" << endl;
printf("\n");*/
}
}
void ConstraintPBD::EdgeColoringGPU(int iterations)
{
auto primList = &(topol.primList);
int primNum = primList->GetSize();
auto p2pIndices = &(Prim2PrimsMap.indices);
auto p2pStartNumList = &(Prim2PrimsMap.startNumList);
uint2 blockSize = primList->EvalBlockSize(512);
//printf("Edge Color GPU: block dim = %d, thread dim = %d\n", blockSize.x, blockSize.y);
//// Host To Device: load buffers for Edge Coloring
//primList->MallocAndLoadToDevice(); // topolPrimList
//p2pIndices->MallocAndLoadToDevice(); // p2pIndices
//p2pStartNumList->MallocAndLoadToDevice(); // p2pStartNumList
//color->MallocAndLoadToDevice(); // color
//prdColor->MallocAndLoadToDevice(); // prdColor
//
//cudaMalloc((void **)&dFlag, sizeof(int));
//cudaError_t cudaStatus = cudaMemcpy(dFlag, &detectConflict, sizeof(int), cudaMemcpyHostToDevice);
//if(cudaStatus == cudaSuccess) printf("\nMalloc and loads succeeded!\n");
// edge coloring SPMD
for (int i = 0; i < iterations; ++i)
{
AssignColorsGPU << < blockSize.x, blockSize.y >> > ((int2*)primList->GetDevicePtr(),
(int*)p2pIndices->GetDevicePtr(),
(int2*)p2pStartNumList->GetDevicePtr(),
(int*)color.GetDevicePtr(),
(int*)prdColor.GetDevicePtr(),
dFlag,
primNum);
ResolveConflictsGPU << < blockSize.x, blockSize.y >> > ((int2*)primList->GetDevicePtr(),
(int*)p2pIndices->GetDevicePtr(),
(int2*)p2pStartNumList->GetDevicePtr(),
(int*)color.GetDevicePtr(),
(int*)prdColor.GetDevicePtr(),
dFlag,
primNum);
}
// Device To Host: load buffers back
color.LoadToHost();
/*if (color.LoadToHost())
printf("Edge Color GPU: Load color Back Succeeded!\n");*/
/*printf("\nAfter iteration(color): ");
for (int i = 0; i < constrPBDBuffer.color.GetSize(); ++i)
{
printf("%d // ", constrPBDBuffer.color.m_Data[i]);
}
printf("\n");*/
// Free GPU memory
/*cudaFree(primList->GetDevicePtr());
cudaFree(p2pIndices->GetDevicePtr());
cudaFree(p2pStartNumList->GetDevicePtr());
cudaFree(color->GetDevicePtr());
cudaFree(prdColor->GetDevicePtr());
cudaFree(dFlag);*/
}
void ConstraintPBD::SortEdgesColors()
{
// cout << "--------" << __FUNCTION__ << "--------" << endl;
/*for (int i = 0; i < color->GetSize(); ++i)
{
printf("%d - ", color->m_Data[i]);
}
printf("\n");
for (int i = 0; i < sortedPrimID->GetSize(); ++i)
{
printf("%d - ", sortedPrimID->m_Data[i]);
}
printf("\n");*/
// cout << __FUNCDNAME__ << endl;
thrust::sort_by_key(color.m_Data.begin(), color.m_Data.end(), sortedPrimId.m_Data.begin());
auto dColor = &(color);
auto dSortedPrimId = &(sortedPrimId);
dColor->LoadToDevice();
dSortedPrimId->LoadToDevice();
/*for (int i = 0; i < color.GetSize(); ++i)
{
printf("color : %d - ", color.m_Data[i]);
}
printf("\n");
for (int i = 0; i < sortedPrimId.GetSize(); ++i)
{
printf("sortedPrimId: %d - ", sortedPrimId.m_Data[i]);
}
printf("\n");*/
/*for (int i = 0; i < sortedColor->GetSize(); ++i)
{
printf("%d - ", sortedColor->m_Data[i]);
}
printf("\n");
printf("\n");
for (int i = 0; i < color->GetSize(); ++i)
{
printf("%d - ", color->m_Data[i]);
}
printf("\n");*/
}
void ConstraintPBD::EvalWorksets()
{
//cout << "--------" << __FUNCTION__ << "--------" << endl;
colorWorksets.m_Data.clear();
int count = 1;
for (int i = 1; i < color.GetSize(); ++i)
{
if (i == color.GetSize() - 1 && color.m_Data[i] == color.m_Data[i - 1])
{
count++;
colorWorksets.m_Data.push_back(make_int2(i - count + 1, count));
}
else if (i == color.GetSize() - 1 && color.m_Data[i] != color.m_Data[i - 1])
{
colorWorksets.m_Data.push_back(make_int2(i - count, count));
colorWorksets.m_Data.push_back(make_int2(i, 1));
}
else if (i != color.GetSize() - 1 && color.m_Data[i] != color.m_Data[i - 1])
{
colorWorksets.m_Data.push_back(make_int2(i - count, count));
count = 1;
}
else
{
count++;
}
}
/*for (int i = 0; i < sortedColor->GetSize(); ++i)
{
printf("%d - ", sortedColor->m_Data[i]);
}*/
/*for (int i = 0; i < colorWorksets.GetSize(); ++i)
{
printf("start: %d, num: %d ", colorWorksets.m_Data[i].x, colorWorksets.m_Data[i].y);
}
printf("\n");*/
}
void ConstraintPBD::Save(std::ofstream& ofs)
{
topol.Save(ofs);
prdPBuffer.SetName("prdPBuffer");
IO::SaveBuffer(prdPBuffer, ofs);
restPosBuffer.SetName("restPosBuffer"); // empty now, initial in initPosition which is not used in read data from houdini
IO::SaveBuffer(restPosBuffer, ofs);
restLengthBuffer.SetName("restLengthBuffer");
IO::SaveBuffer(restLengthBuffer, ofs);
stiffnessBuffer.SetName("stiffnessBuffer");
IO::SaveBuffer(stiffnessBuffer, ofs);
constraintType.SetName("constraintType");
IO::SaveBuffer(constraintType, ofs);
}
// ----------------------------------------------------------------------------------------
// PBDObject class
void PBDObject::Init()
{
initMeshTopol();
initConstr();
}
void PBDObject::Init(string topolFileName, string distConstrFileName)
{
/*if ((IO::readTopolFromTxt(topolFileName, this)) && (IO::readDistConstrFromTxt(distConstrFileName, this)))
printf("PBD Object was initialized successfully\n");*/
bool readTopol = IO::ReadTopolFromTxt(topolFileName, this);
bool readConstr = IO::ReadDistConstrFromTxt(distConstrFileName, this);
if (readTopol && readConstr)
printf("PBD Object was initialized successfully\n");
}
void PBDObject::ContinueSimInit(string meshTopolPath, string constrPath, HardwareType hardwareType)
{
ht = hardwareType;
bool readTopol = IO::ReadTopolFromCache(meshTopolPath, this);
bool readConstr = IO::ReadConstraintFromCache(constrPath, this);
if (readTopol && readConstr)
printf("PBD Object was initialized successfully\n");
}
void PBDObject::SetConstrOption(uint ct, float* stiffnessSetting)
{
this->ct = ct;
this->stiffnessSetting = stiffnessSetting;
}
void PBDObject::initMeshTopol()
{
// OpenGL Topology
meshTopol.indices.SetName("Indices");
meshTopol.posBuffer.SetName("P");
meshTopol.primList.SetName("primList");
initPosition(make_float2(0.0, 0.0));
initMassVel();
initMeshTopolIndices();
}
void PBDObject::initMassVel()
{
// init mass
float3 initVel = make_float3(0.0f, 0.0f, 0.0f);
int num = resY * resX;
for (int i = 0; i < num; i++)
{
massBuffer.m_Data.push_back(1.0);
/*if (i == 0 || i == resY - 1)
{
massBuffer.m_Data.push_back(0.0);
}
else
{
massBuffer.m_Data.push_back(1.0);
}*/
velBuffer.m_Data.push_back(initVel);
}
}
void PBDObject::InitGPUBuffers()
{
// printf("init GPU buffers\n");
auto dPrimList = &(constrPBDBuffer.topol.primList);
auto dP2pIndices = &(constrPBDBuffer.Prim2PrimsMap.indices);
auto dP2pStartNumList = &(constrPBDBuffer.Prim2PrimsMap.startNumList);
auto dColor = &(constrPBDBuffer.color);
auto dPrdColor = &(constrPBDBuffer.prdColor);
auto dsortedPrimId = &(constrPBDBuffer.sortedPrimId);
auto dConstrType = &(constrPBDBuffer.constraintType);
auto dVelBuffer = &(velBuffer);
auto dPrdPBuffer = &(constrPBDBuffer.prdPBuffer);
auto dPositionBuffer = &(meshTopol.posBuffer);
auto dMassBuffer = &(massBuffer);
auto dRestLengthBuffer = &(constrPBDBuffer.restLengthBuffer);
auto dStiffnessBuffer = &(constrPBDBuffer.stiffnessBuffer);
auto dRestPosBuffer = &(constrPBDBuffer.restPosBuffer);
auto dIndices = &(constrPBDBuffer.topol.indices);
// Host To Device: load buffers for Edge Coloring
cudaMalloc((void**)&constrPBDBuffer.dFlag, sizeof(int));
cudaError_t cudaStatus = cudaMemcpy(constrPBDBuffer.dFlag, &(constrPBDBuffer.detectConflict), sizeof(int), cudaMemcpyHostToDevice);
//if (cudaStatus == cudaSuccess) printf("\nMalloc and loads succeeded!\n");
dPrimList->MallocAndLoadToDevice(); // topolPrimList
dP2pIndices->MallocAndLoadToDevice(); // p2pIndices
dP2pStartNumList->MallocAndLoadToDevice(); // p2pStartNumList
dColor->MallocAndLoadToDevice(); // color
dPrdColor->MallocAndLoadToDevice(); // prdColor
dConstrType->MallocAndLoadToDevice(); // constrType
dsortedPrimId->DeviceMalloc();
dVelBuffer->MallocAndLoadToDevice(); // velocity Buffer
dPrdPBuffer->MallocAndLoadToDevice(); // predicted position buffer
dPositionBuffer->MallocAndLoadToDevice(); // point real position buffer
dMassBuffer->MallocAndLoadToDevice();
dRestLengthBuffer->MallocAndLoadToDevice();
dStiffnessBuffer->MallocAndLoadToDevice();
dRestPosBuffer->MallocAndLoadToDevice();
dIndices->MallocAndLoadToDevice();
}
void PBDObject::freeGPUBuffers()
{
auto dPrimList = &(constrPBDBuffer.topol.primList);
auto dP2pIndices = &(constrPBDBuffer.Prim2PrimsMap.indices);
auto dP2pStartNumList = &(constrPBDBuffer.Prim2PrimsMap.startNumList);
auto dColor = &(constrPBDBuffer.color);
auto dPrdColor = &(constrPBDBuffer.prdColor);
auto dSortedPrimId = &(constrPBDBuffer.sortedPrimId);
auto dConstrType = &(constrPBDBuffer.constraintType);
auto dVelBuffer = &(velBuffer);
auto dPrdPBuffer = &(constrPBDBuffer.prdPBuffer);
auto dPositionBuffer = &(meshTopol.posBuffer);
auto dMassBuffer = &(massBuffer);
auto dRestLengthBuffer = &(constrPBDBuffer.restLengthBuffer);
auto dStiffnessBuffer = &(constrPBDBuffer.stiffnessBuffer);
auto dRestPosBuffer = &(constrPBDBuffer.restPosBuffer);
auto dIndices = &(constrPBDBuffer.topol.indices);
dPositionBuffer->LoadToHost();
dVelBuffer->LoadToHost();
cudaFree(constrPBDBuffer.dFlag);
cudaFree(dPrimList->GetDevicePtr());
cudaFree(dP2pIndices->GetDevicePtr());
cudaFree(dP2pStartNumList->GetDevicePtr());
cudaFree(dColor->GetDevicePtr());
cudaFree(dPrdColor->GetDevicePtr());
cudaFree(dSortedPrimId->GetDevicePtr());
cudaFree(dConstrType->GetDevicePtr());
cudaFree(dVelBuffer->GetDevicePtr());
cudaFree(dPrdPBuffer->GetDevicePtr());
cudaFree(dPositionBuffer->GetDevicePtr());
cudaFree(dMassBuffer->GetDevicePtr());
cudaFree(dRestLengthBuffer->GetDevicePtr());
cudaFree(dStiffnessBuffer->GetDevicePtr());
cudaFree(dRestPosBuffer->GetDevicePtr());
cudaFree(dIndices->GetDevicePtr());
}
// init : allocation
// setvalue
void PBDObject::initConstr()
{
constrPBDBuffer.ht = ht;
constrPBDBuffer.topol.posBuffer = meshTopol.posBuffer;
constrPBDBuffer.prdPBuffer = meshTopol.posBuffer;
constrPBDBuffer.topol.primList.SetName("primList");
constrPBDBuffer.topol.indices.SetName("Indices");
constrPBDBuffer.color.SetName("color");
constrPBDBuffer.prdColor.SetName("prdcolor");
constrPBDBuffer.sortedPrimId.SetName("sortedPrimId");
if ((DISTANCE & ct) == DISTANCE)
{
constrPBDBuffer.InitDistanceConstr(meshTopol.posBuffer, stiffnessSetting[0], resY, resX);
}
if ((BENDING & ct) == BENDING)
{
constrPBDBuffer.InitBendingConstr();
}
if ((ANCHOR & ct) == ANCHOR)
{
constrPBDBuffer.InitAnchorConstr(meshTopol.posBuffer, -1.0f, resY);
}
constrPBDBuffer.GenePoint2PrimsMap(constrPBDBuffer.topol);
constrPBDBuffer.GenePrim2PrimsMap(constrPBDBuffer.topol);
if (ht == GPU)
{
InitGPUBuffers();
constrPBDBuffer.EdgeColoring(20000);
}
}
void PBDObject::initPosition(float2 cord)
{
auto positionBuffer = &(meshTopol.posBuffer);
float lengthInterval = sizeX / (resX - 1);
float heightInterval = sizeY / (resY - 1);
int num = resY * resX;
int index = 0;
for (int i = 0; i < resY; i++)
{
for (int j = 0; j < resX; j++)
{
float3 p;
p.x = cord.x + j * lengthInterval;
p.y = 0;
p.z = cord.y + i * heightInterval;
positionBuffer->m_Data.push_back(p);
index++;
}
}
constrPBDBuffer.restPosBuffer.m_Data.push_back(positionBuffer->m_Data[0]);
constrPBDBuffer.restPosBuffer.m_Data.push_back(positionBuffer->m_Data[resX - 1]);
}
void PBDObject::initMeshTopolIndices()
{
auto meshTopolIndicies = &(meshTopol.indices);
int num = resY * resX;
for (int i = 0; i < num - resX; i++)
{
if (i % resX == resX - 1)
continue;
meshTopolIndicies->m_Data.push_back(i);
meshTopolIndicies->m_Data.push_back(i + resX);
meshTopolIndicies->m_Data.push_back(i + resX + 1);
meshTopolIndicies->m_Data.push_back(i);
meshTopolIndicies->m_Data.push_back(i + resX + 1);
meshTopolIndicies->m_Data.push_back(i + 1);
}
}
void PBDObject::groundTruthTest()
{
vector<int> arr0 = { 0, 1, 3, 3, 2, 0 };
vector<int> arr1 = { 0,3,0,1,0,2,1,3,2,3 };
constrPBDBuffer.topol.indices.m_Data = arr1;
vector<int2> arr2 = { make_int2(0,2), make_int2(2,2), make_int2(4,2), make_int2(6,2), make_int2(8,2) };
constrPBDBuffer.topol.primList.m_Data = arr2;
vector<int> arr3 = { -1,-1,-1,-1,-1 };
constrPBDBuffer.color.m_Data = arr3;
constrPBDBuffer.prdColor.m_Data = arr3;
}
void PBDObject::Save(string path)
{
PBD_DEBUG;
std::ofstream ofs(path);
if (!ofs.is_open())
return;
ofs << "Header|float3Buffer,6;floatBuffer,2;int2Buffer,2;intBuffer,3;float3,1;float,1" << endl; //HEADER
meshTopol.indices.SetName("meshTopol indices");
meshTopol.posBuffer.SetName("meshTopol posBuffer");
meshTopol.primList.SetName("meshTopol primList");
meshTopol.Save(ofs);
constrPBDBuffer.Save(ofs);
velBuffer.SetName("velBuffer");
IO::SaveBuffer(velBuffer, ofs);
massBuffer.SetName("massBuffer");
IO::SaveBuffer(massBuffer, ofs);
IO::SaveData(dampingRate, "dampingRate", ofs);
IO::SaveData(gravity, "gravity", ofs);
ofs.flush();
ofs.close();
}
void PBDObject::SaveMeshTopol(string path)
{
PBD_DEBUG;
std::ofstream ofs(path);
if (!ofs.is_open())
return;
//ofs << "Header|float3Buffer,3;int2Buffer,1;intBuffer,1;float3,1;float,1" << endl; //HEADER
meshTopol.Save(ofs);
velBuffer.SetName("velBuffer");
IO::SaveBuffer(velBuffer, ofs);
massBuffer.SetName("massBuffer");
IO::SaveBuffer(massBuffer, ofs);
IO::SaveData(dampingRate, "dampingRate", ofs);
IO::SaveData(gravity, "gravity", ofs);
ofs.flush();
ofs.close();
}
void PBDObject::SaveConstraint(string path)
{
PBD_DEBUG;
std::ofstream ofs(path);
if (!ofs.is_open())
return;
//ofs << "Header|float3Buffer,3;floatBuffer,2;int2Buffer,1;intBuffer,2" << endl; //HEADER
constrPBDBuffer.Save(ofs);
ofs.flush();
ofs.close();
}
void PBDObject::Read(string path)
{
}
// ----------------------------------------------------------------------------------------
// SolverPBD class
// kernel functions for SolverPBD class
void __global__ AdvectGPUKernel(
int pointNum,
float dt,
float dampingRate,
float3 gravity,
float* mass,
float3* velBuffer,
float3* prdPBuffer,
float3* positionBuffer)
{
int pointId = blockIdx.x * blockDim.x + threadIdx.x;
// if(primId < 5) printf("\tResolveConflictsGPU-%d: prdColor %d\n", primId, prdColor[primId]);
if (pointId >= pointNum)
return;
/*if (i == 30)
printf("old velocity Buffer: %f, %f, %f \n", velBuffer->m_Data[i].x, velBuffer->m_Data[i].y, velBuffer->m_Data[i].z);*/
velBuffer[pointId] += gravity * dt * mass[pointId];
/*if(i == 30)
printf("new velocity Buffer: %f, %f, %f \n", velBuffer->m_Data[i].x, velBuffer->m_Data[i].y, velBuffer->m_Data[i].z);*/
velBuffer[pointId] *= powf(dampingRate, dt);
prdPBuffer[pointId] = positionBuffer[pointId] + velBuffer[pointId] * dt;
//printf("postion Buffer: %f, %f, %f \n", prdPBuffer.m_Data[j].x, prdPBuffer.m_Data[j].y, prdPBuffer.m_Data[j].z);
/*printf("Advect: prdPBuffer: \n");
printf("(%f,%f,%f)", prdPBuffer[pointId].x, prdPBuffer[pointId].y, prdPBuffer[pointId].z);*/
}
void SolverPBD::Advect(float dt)
{
PBD_DEBUG;
switch (m_ht) // TODO: change back to ht
{
case CPU:
advectCPU(dt);
break;
case GPU:
advectGPU(dt);
break;
default:
break;
}
}
void SolverPBD::advectCPU(float dt)
{
auto velBuffer = &(m_pbdObj->velBuffer);
auto prdPBuffer = &(m_pbdObj->constrPBDBuffer.prdPBuffer);
auto positionBuffer = &(m_pbdObj->meshTopol.posBuffer);
for (int i = 0; i < velBuffer->GetSize(); i++)
{
velBuffer->m_Data[i] += m_pbdObj->gravity * dt;
prdPBuffer->m_Data[i] = positionBuffer->m_Data[i] + velBuffer->m_Data[i] * dt;
}
}
void SolverPBD::advectGPU(float dt)
{
auto velBuffer = &(m_pbdObj->velBuffer);
auto prdPBuffer = &(m_pbdObj->constrPBDBuffer.prdPBuffer);
auto positionBuffer = &(m_pbdObj->meshTopol.posBuffer);
auto massBuffer = &(m_pbdObj->massBuffer);
//printf("Before Advect GPU:");
//printf("point 0: %f, %f, %f; point col-1: %f, %f, %f\n",
// prdPBuffer->m_Data[0].x, prdPBuffer->m_Data[0].y, prdPBuffer->m_Data[0].z,
// prdPBuffer->m_Data[pbdObj->resY-1].x, prdPBuffer->m_Data[pbdObj->resProjectConstraintGPUY - 1].y,
// prdPBuffer->m_Data[pbdObj->resY - 1].z );
int pointNum = prdPBuffer->GetSize();
float dampingRate = m_pbdObj->dampingRate;
float3 gravity = m_pbdObj->gravity;
uint2 blockSize = positionBuffer->EvalBlockSize(512);
//printf("Advect GPU: block dim = %d, thread dim = %d\n", blockSize.x, blockSize.y);
// Host To Device: load buffers for Advect
// edge coloring SPMD
AdvectGPUKernel << < blockSize.x, blockSize.y >> > (pointNum,
dt,
dampingRate,
gravity,
(float*)massBuffer->GetDevicePtr(),
(float3*)velBuffer->GetDevicePtr(),
(float3*)prdPBuffer->GetDevicePtr(),
(float3*)positionBuffer->GetDevicePtr());
// Device To Host: load buffers back
//if (prdPBuffer->LoadToHost())
// printf("Advect GPU: Load prdPBuffer Back Succeeded!\n");
//if (velBuffer->LoadToHost())
// printf("Advect GPU: Load velBuffer Back Succeeded!\n");
//printf("After Advect GPU:");
//printf("point 0: %f, %f, %f; point col-1: %f, %f, %f\n",
// prdPBuffer->m_Data[0].x, prdPBuffer->m_Data[0].y, prdPBuffer->m_Data[0].z,
// prdPBuffer->m_Data[pbdObj->resY - 1].x, prdPBuffer->m_Data[pbdObj->resY - 1].y,
// prdPBuffer->m_Data[pbdObj->resY - 1].z);
}
void SolverPBD::ProjectConstraint(SolverType st, int iterations)
{
m_pbdSolverTimer->Tick();
switch (m_ht)
{
case CPU:
projectConstraintCPU(st, iterations);
break;
case GPU:
projectConstraintGPU(st, iterations);
break;
default:
break;
}
m_pbdSolverTimer->Tock();
PBD_DEBUGTIME(m_pbdSolverTimer->GetFuncTime());
}
void SolverPBD::ProjectConstraintWithColli(SolverType st, int iterations, CollisionSolver* colliSolver,
BufferVector3f& fixedBuffer, BufferVector3f& vFixedBuffer, BufferVector3f& fFixedBuffer, int debug)
{
m_pbdSolverTimer->Tick();
switch (m_ht)
{
case CPU:
projectConstraintWithColliCPU(st, iterations, colliSolver, fixedBuffer, vFixedBuffer, fFixedBuffer, debug);
break;
case GPU:
projectConstraintGPU(st, iterations);
break;
default:
break;
}
m_pbdSolverTimer->Tock();
}
void SolverPBD::projectConstraintCPU(SolverType st, int iterations)
{
auto primList = &(m_pbdObj->constrPBDBuffer.topol.primList);
auto prdPBuffer = &(m_pbdObj->constrPBDBuffer.prdPBuffer);
auto massBuffer = &(m_pbdObj->massBuffer);
auto restLengthBuffer = &(m_pbdObj->constrPBDBuffer.restLengthBuffer);
auto stiffnessBuffer = &(m_pbdObj->constrPBDBuffer.stiffnessBuffer);
// auto restPosBuffer = &(m_pbdObj->constrPBDBuffer.restPosBuffer);
auto indices = &(m_pbdObj->constrPBDBuffer.topol.indices);
for (size_t ii = 0; ii < iterations; ii++)
{
for (size_t i = 0; i < primList->GetSize(); i++)
{
if (primList->m_Data[i].y != 2)
continue;
int i0 = indices->m_Data[primList->m_Data[i].x];
int i1 = indices->m_Data[primList->m_Data[i].x + 1];
float3 dp1;
float3 dp2;
float d = Distance(prdPBuffer->m_Data[i0], prdPBuffer->m_Data[i1]);
float3 r = prdPBuffer->m_Data[i0] - prdPBuffer->m_Data[i1];
r = normalize(r);
dp1.x = -massBuffer->m_Data[i0] / (massBuffer->m_Data[i0] + massBuffer->m_Data[i1]) * (d - restLengthBuffer->m_Data[i]) *r.x ;
dp1.y = -massBuffer->m_Data[i0] / (massBuffer->m_Data[i0] + massBuffer->m_Data[i1]) * (d - restLengthBuffer->m_Data[i]) *r.y ;
dp1.z = -massBuffer->m_Data[i0] / (massBuffer->m_Data[i0] + massBuffer->m_Data[i1]) * (d - restLengthBuffer->m_Data[i]) *r.z ;
dp2.x = massBuffer->m_Data[i1] / (massBuffer->m_Data[i0] + massBuffer->m_Data[i1]) * (d - restLengthBuffer->m_Data[i]) *r.x ;
dp2.y = massBuffer->m_Data[i1] / (massBuffer->m_Data[i0] + massBuffer->m_Data[i1]) * (d - restLengthBuffer->m_Data[i]) *r.y ;
dp2.z = massBuffer->m_Data[i1] / (massBuffer->m_Data[i0] + massBuffer->m_Data[i1]) * (d - restLengthBuffer->m_Data[i]) *r.z ;
float k = 1;// -powf(1 - stiffnessBuffer->m_Data[i], 1.0 / (ii + 1));
dp1 *= k;
dp2 *= k;
prdPBuffer->m_Data[i0] += dp1;
prdPBuffer->m_Data[i1] += dp2;
}
ColliWithShpGrd();
}
}
void SolverPBD::projectConstraintWithColliCPU(SolverType st, int iterations, CollisionSolver* colliSolver,
BufferVector3f& fixedBuffer, BufferVector3f& vFixedBuffer, BufferVector3f& fFixedBuffer, int debug)
{
PBD_DEBUG;
int debugFrameId = 1;
auto primList = &(m_pbdObj->constrPBDBuffer.topol.primList);
auto prdPBuffer = &(m_pbdObj->constrPBDBuffer.prdPBuffer);
auto massBuffer = &(m_pbdObj->massBuffer);
auto restLengthBuffer = &(m_pbdObj->constrPBDBuffer.restLengthBuffer);
auto stiffnessBuffer = &(m_pbdObj->constrPBDBuffer.stiffnessBuffer);
// auto restPosBuffer = &(m_pbdObj->constrPBDBuffer.restPosBuffer);
auto indices = &(m_pbdObj->constrPBDBuffer.topol.indices);
colliSolver->afterProjPrdpBuffer = m_pbdObj->constrPBDBuffer.prdPBuffer;
for (size_t ii = 0; ii < iterations; ii++)
{
if ((ii % 10 ==0) || (ii == iterations - 1)) // 0 10 20 30 || == -1
{
colliSolver->CCD_SH_Extended();
//colliSolver->CCD_SH();
printf("contact size: %d\n", colliSolver->contactData.ctxs.GetSize());
}
//printInfo("--- in project", prdPBuffer->m_Data[1]);
//for (int i = 0; i <= 3; ++i)
//{
for (size_t i = 0; i < primList->GetSize(); i++)
{
if (primList->m_Data[i].y != 2)
continue;
int i0 = indices->m_Data[primList->m_Data[i].x];
int i1 = indices->m_Data[primList->m_Data[i].x + 1];
float3 dp1;
float3 dp2;
float d = Distance(prdPBuffer->m_Data[i0], prdPBuffer->m_Data[i1]);
float3 r = prdPBuffer->m_Data[i0] - prdPBuffer->m_Data[i1];
r = normalize(r);
dp1.x = -massBuffer->m_Data[i0] / (massBuffer->m_Data[i0] + massBuffer->m_Data[i1]) * (d - restLengthBuffer->m_Data[i]) * r.x;
dp1.y = -massBuffer->m_Data[i0] / (massBuffer->m_Data[i0] + massBuffer->m_Data[i1]) * (d - restLengthBuffer->m_Data[i]) * r.y;
dp1.z = -massBuffer->m_Data[i0] / (massBuffer->m_Data[i0] + massBuffer->m_Data[i1]) * (d - restLengthBuffer->m_Data[i]) * r.z;
dp2.x = massBuffer->m_Data[i1] / (massBuffer->m_Data[i0] + massBuffer->m_Data[i1]) * (d - restLengthBuffer->m_Data[i]) * r.x;
dp2.y = massBuffer->m_Data[i1] / (massBuffer->m_Data[i0] + massBuffer->m_Data[i1]) * (d - restLengthBuffer->m_Data[i]) * r.y;
dp2.z = massBuffer->m_Data[i1] / (massBuffer->m_Data[i0] + massBuffer->m_Data[i1]) * (d - restLengthBuffer->m_Data[i]) * r.z;
float k = 1;// -powf(1 - stiffnessBuffer->m_Data[i], 1.0 / (ii + 1));
dp1 *= k;
dp2 *= k;
prdPBuffer->m_Data[i0] += dp1;
prdPBuffer->m_Data[i1] += dp2;
}
//}
//colliSolver->CollisionResolve();
/*string beforeResolvePath = "D://0326Test//testData//testBeforeResolve." + to_string(debug * iterations + ii) + ".cache";
m_pbdObj->constrPBDBuffer.prdPBuffer.SetName("P");
Topology tempBeforeResolve;
tempBeforeResolve.indices = m_pbdObj->meshTopol.indices;
tempBeforeResolve.primList = m_pbdObj->meshTopol.primList;
tempBeforeResolve.posBuffer = m_pbdObj->constrPBDBuffer.prdPBuffer;
tempBeforeResolve.indices.SetName("Indices");
tempBeforeResolve.primList.SetName("primList");
tempBeforeResolve.posBuffer.SetName("P");
IO::SaveToplogy(tempBeforeResolve, beforeResolvePath);*/
//for (int i = 0; i <= 2; ++i)
//{
colliSolver->CollisionResolveNew(fixedBuffer, vFixedBuffer, fFixedBuffer, (debug * iterations + ii), ii, debugFrameId);
//}
/*string path = "D://0326Test//testData//test." + to_string(debug*iterations + ii) + ".cache";
m_pbdObj->constrPBDBuffer.prdPBuffer.SetName("P");
Topology temp;
temp.indices = m_pbdObj->meshTopol.indices;
temp.primList = m_pbdObj->meshTopol.primList;
temp.posBuffer = m_pbdObj->constrPBDBuffer.prdPBuffer;
temp.indices.SetName("Indices");
temp.primList.SetName("primList");
temp.posBuffer.SetName("P");
IO::SaveToplogy(temp, path);*/
//printInfo("--- after resolve", prdPBuffer->m_Data[1]);
//printf("--------------------itreation %d-------------------\n", ii);
ColliWithShpGrd();
}
}
// Attach Points
//for (size_t j = 0; j < prdPBuffer->GetSize(); j++)
//{
// //attach points
// if (j == 0)
// {
// prdPBuffer->m_Data[j] = restPosBuffer->m_Data[0];
// }
// if (j == m_pbdObj->resY - 1)
// {
// prdPBuffer->m_Data[j] = restPosBuffer->m_Data[1];
// }
// ////point collide with sphere
// //bool isCollideSphere = ColliderSphere(prdPBuffer.m_Data[j], sphereOrigin, sphereRadius, j);
// //if (isCollideSphere) //move the point to the point which intersect with sphere
// //{
// // float3 moveVector = GenerateMoveVectorSphere(sphereOrigin, sphereRadius, prdPBuffer.m_Data[j], j);
// // prdPBuffer.m_Data[j] += moveVector;
// //}
// ////point collide with ground
// //bool isCollideGoround = CollideGround(prdPBuffer.m_Data[j], groundCenter);
// //if (isCollideGoround)
// //{
// // prdPBuffer.m_Data[j].y = groundCenter.y;
// //}
//}
void __global__ ProjectContraintsGPUKernel(
int resY,
int start,
int num,
int iteration,
int* sortedPrimId,
int* indices,
int* constraintType,
float* massBuffer,
float* restLengthBuffer,
float* stiffnessBuffer,
int2* primList,
float3* prdPBuffer,
float3* restBuffer)
{
int idx = blockIdx.x * blockDim.x + threadIdx.x;
if (idx >= num)
return;
//printf("idx: %d ---- ", idx + start);
auto primId = sortedPrimId[idx + start];
//printf("idx + start: %d, primId: %d \n", idx + start, primId);
if (constraintType[primId] == DISTANCE)
{
int i0 = indices[primList[primId].x];
int i1 = indices[primList[primId].x + 1];
float3 p0 = prdPBuffer[i0];
float3 p1 = prdPBuffer[i1];
/*printf("Project Constraint: prdPBuffer: ");
printf("i0: (%f,%f,%f)", prdPBuffer[i0].x, prdPBuffer[i0].y, prdPBuffer[i0].z);
printf("i1: (%f,%f,%f)\n", prdPBuffer[i1].x, prdPBuffer[i1].y, prdPBuffer[i1].z);*/
float3 dp0;
float3 dp1;
float d = Distance(p0, p1);
float3 v;
v = p0 - p1;
//printf("mass: %f", massBuffer[i0]);
dp0 = -massBuffer[i0] / (massBuffer[i0] + massBuffer[i1]) * (d - restLengthBuffer[primId]) * v / d;
dp1 = massBuffer[i1] / (massBuffer[i0] + massBuffer[i1]) * (d - restLengthBuffer[primId]) * v / d;
float k = 1 - powf(1 - stiffnessBuffer[primId], 1.0 / (iteration + 1));
/*printf("dp0: (%f,%f,%f) ; ", dp0.x, dp0.y, dp0.z);
printf("dp1: (%f,%f,%f)", dp1.x, dp1.y, dp1.z);
printf("d: %f, k: %f \n",d, k);*/
dp0 *= k;
dp1 *= k;
prdPBuffer[i0] += dp0;
prdPBuffer[i1] += dp1;
/*printf("Project Constraint: prdPBuffer: ");
printf("i0: (%f,%f,%f)", prdPBuffer[i0].x, prdPBuffer[i0].y, prdPBuffer[i0].z);
printf("i1: (%f,%f,%f)\n", prdPBuffer[i1].x, prdPBuffer[i1].y, prdPBuffer[i1].z);*/
}
if (constraintType[primId] == ANCHOR)
{
int i = indices[primList[primId].x];
if (i == 0)
prdPBuffer[i] = restBuffer[0];
if (i == (resY - 1))
prdPBuffer[i] = restBuffer[1];
}
}
void SolverPBD::projectConstraintGPU(SolverType st, int iterations)
{
m_pbdObj->constrPBDBuffer.SortEdgesColors();
m_pbdObj->constrPBDBuffer.EvalWorksets();
// cout << "--------" << __FUNCTION__ << "--------" << endl;
auto worksets = &(m_pbdObj->constrPBDBuffer.colorWorksets);
auto primList = &(m_pbdObj->constrPBDBuffer.topol.primList);
auto sortedPrimId = &(m_pbdObj->constrPBDBuffer.sortedPrimId);
auto prdPBuffer = &(m_pbdObj->constrPBDBuffer.prdPBuffer);
auto massBuffer = &(m_pbdObj->massBuffer);
auto restLengthBuffer = &(m_pbdObj->constrPBDBuffer.restLengthBuffer);
auto stiffnessBuffer = &(m_pbdObj->constrPBDBuffer.stiffnessBuffer);
auto restPosBuffer = &(m_pbdObj->constrPBDBuffer.restPosBuffer);
auto indices = &(m_pbdObj->constrPBDBuffer.topol.indices);
auto constraintType = &(m_pbdObj->constrPBDBuffer.constraintType);
for (int i = 0; i < iterations; ++i)
{
for (auto workset : worksets->m_Data)
{
int start = workset.x;
int num = workset.y;
int numBlock = 1;
int numThread = 512;
if (num > numThread && num < 1024)
numThread = num;
else if (num > 1024)
numBlock = ceil(num / 512);
//printf("----------------------------\n");
//printf(" numBlock: %d numThread: %d\n", numBlock, numThread);
/*printf("indices: ");
for (int i = 0; i < indices->GetSize(); ++i)
{
cout << indices->m_Data[i] << "-";
}
printf("\n");
printf("constraintType: ");
for (int i = 0; i < constraintType->GetSize(); ++i)
{
cout << constraintType->m_Data[i] << "-";
}
printf("\n");
printf("massBuffer: ");
for (int i = 0; i < massBuffer->GetSize(); ++i)
{
cout << massBuffer->m_Data[i] << "-";
}
printf("\n");
printf("restLengthBuffer: ");
for (int i = 0; i < restLengthBuffer->GetSize(); ++i)
{
cout << restLengthBuffer->m_Data[i] << "-";
}
printf("\n");
printf("stiffnessBuffer: ");
for (int i = 0; i < stiffnessBuffer->GetSize(); ++i)
{
cout << stiffnessBuffer->m_Data[i] << "-";
}*/
/*printf("prdPBuffer: \n");
for (int i = 0; i < prdPBuffer->GetSize(); ++i)
{
cout << "(" << prdPBuffer->m_Data[i].x << "," << prdPBuffer->m_Data[i].y << "," << prdPBuffer->m_Data[i].z << ")" << endl;
}
printf("\n");
printf("primList: \n");
for (int i = 0; i < primList->GetSize(); ++i)
{
cout << "(" << primList->m_Data[i].x << "," << primList->m_Data[i].y << ")" << endl;
}*/
ProjectContraintsGPUKernel << <numBlock, numThread >> > (
m_pbdObj->resY,
start,
num,
i,
((int*)sortedPrimId->GetDevicePtr()),
(int*)indices->GetDevicePtr(),
(int*)constraintType->GetDevicePtr(),
(float*)massBuffer->GetDevicePtr(),
(float*)restLengthBuffer->GetDevicePtr(),
(float*)stiffnessBuffer->GetDevicePtr(),
(int2*)primList->GetDevicePtr(),
(float3*)prdPBuffer->GetDevicePtr(),
(float3*)restPosBuffer->GetDevicePtr());
/*cudaError_t cudaStatus = cudaGetLastError();
if (cudaStatus != cudaSuccess)
{
fprintf(stderr, "addKernel launch failed: %s\n", cudaGetErrorString(cudaStatus));
}*/
}
}
//{
// // TODO: edge coloring
// /*
// int* color
// int* sortedColor
// int* sortedPrim
// int main
// {
// sort(color, sortedColor, sortedPrim)
// //color: 11220001110222
// //sortedColor:000111222
// //sortedPrim:
// int2 workSets = Eval(SortedColor)
// //workSets.x start
// //workSets.y num
// for(auto workset: workSets)
// {
// int start = workSets.x
// int num = workSets.y
// kernel project<<<numBlock, numThread>>>(num, sortedPrim+start, prdPbuffer, restlength)
// {
// if(index > = num)
// return;
// int2 prim = sortedPrimId[index]
// int i0 = primList[prim.x]
// int i1 = primList[prim.x + 1]
// float3 p0 = prdPbuffer[i0]
// float3 p1 = prdPbuffer[i1]
// }
// }
// }
//}
}
void SolverPBD::Integration(float dt)
{
PBD_DEBUG;
switch (m_ht) // TODO: change back to ht
{
case CPU:
integrationCPU(dt);
break;
case GPU:
integrationGPU(dt);
break;
default:
break;
}
}
void SolverPBD::integrationCPU(float dt)
{
auto positionBuffer = &(m_pbdObj->meshTopol.posBuffer);
auto velBuffer = &(m_pbdObj->velBuffer);
auto prdPBuffer = &(m_pbdObj->constrPBDBuffer.prdPBuffer);
for (size_t i = 0; i < positionBuffer->GetSize(); i++)
{
velBuffer->m_Data[i] = (prdPBuffer->m_Data[i] - positionBuffer->m_Data[i]) / dt;
positionBuffer->m_Data[i] = prdPBuffer->m_Data[i];
}
}
void __global__ IntegrationGPUKernel(
int pointNum,
float dt,
float3* velBuffer,
float3* prdPBuffer,
float3* positionBuffer)
{
int pointId = blockIdx.x * blockDim.x + threadIdx.x;
// if(primId < 5) printf("\tResolveConflictsGPU-%d: prdColor %d\n", primId, prdColor[primId]);
if (pointId >= pointNum)
return;
velBuffer[pointId] = (prdPBuffer[pointId] - positionBuffer[pointId]) / dt;
positionBuffer[pointId] = prdPBuffer[pointId];
}
// TODO
void SolverPBD::integrationGPU(float dt)
{
auto positionBuffer = &(m_pbdObj->meshTopol.posBuffer);
auto velBuffer = &(m_pbdObj->velBuffer);
auto prdPBuffer = &(m_pbdObj->constrPBDBuffer.prdPBuffer);
/*printf("Before Integration GPU:");
printf("point 0: %f, %f, %f; point col-1: %f, %f, %f\n",
prdPBuffer->m_Data[0].x, prdPBuffer->m_Data[0].y, prdPBuffer->m_Data[0].z,
prdPBuffer->m_Data[pbdObj->resY - 1].x, prdPBuffer->m_Data[pbdObj->resY - 1].y,
prdPBuffer->m_Data[pbdObj->resY - 1].z);*/
int pointNum = prdPBuffer->GetSize();
uint2 blockSize = positionBuffer->EvalBlockSize(512);
//printf("Integration GPU: block dim = %d, thread dim = %d\n", blockSize.x, blockSize.y);
IntegrationGPUKernel << < blockSize.x, blockSize.y >> > (pointNum,
dt,
(float3*)velBuffer->GetDevicePtr(),
(float3*)prdPBuffer->GetDevicePtr(),
(float3*)positionBuffer->GetDevicePtr());
// Device To Host: load buffers back
/*if (positionBuffer->LoadToHost())
printf("Integration GPU: Load positionBuffer Back Succeeded!\n");
if (velBuffer->LoadToHost())
printf("Integration GPU: Load velBuffer Back Succeeded!\n");*/
//printf("After Integration GPU:");
//printf("point 0: %f, %f, %f; point col-1: %f, %f, %f\n",
// positionBuffer->m_Data[0].x, positionBuffer->m_Data[0].y, positionBuffer->m_Data[0].z,
// positionBuffer->m_Data[pbdObj->resY - 1].x, positionBuffer->m_Data[pbdObj->resY - 1].y,
// positionBuffer->m_Data[pbdObj->resY - 1].z);
}
void SolverPBD::ColliWithShpGrd()
{
auto posBuffer = &(m_pbdObj->meshTopol.posBuffer);
auto prdPBuffer = &(m_pbdObj->constrPBDBuffer.prdPBuffer);
for (int vtxId = 0; vtxId < posBuffer->GetSize(); ++vtxId)
{
//point collide with sphere
bool isCollideSphere = ColliderSphere(prdPBuffer->m_Data[vtxId], m_sphereCenter, m_sphereRadius);
if (isCollideSphere) //move the point to the point which intersect with sphere
{
float3 moveVector = GenerateMoveVectorSphere(m_sphereCenter, m_sphereRadius, prdPBuffer->m_Data[vtxId]);
prdPBuffer->m_Data[vtxId] += moveVector;
}
//point collide with ground
//bool isCollideGoround = CollideGround(prdPBuffer->m_Data[vtxId], m_groundHeight);
//if (isCollideGoround)
//{
// prdPBuffer->m_Data[vtxId].y = m_groundHeight;
//}
}
}
bool SolverPBD::ColliderSphere(float3 pointPos, float3 sphereOrigin, float r)
{
float d = Distance(pointPos, sphereOrigin);
if (d - r > 0.001)
{
return false;
}
else
{
return true;
}
}
bool SolverPBD::CollideGround(float3 pointPos, float groundHeight)
{
if (pointPos.y - groundHeight < 0.001)
{
return true;
}
else
{
return false;
}
}
float3 SolverPBD::GenerateMoveVectorSphere(float3 sphereOrigin, float sphereRadius, float3 p)
{
float moveDistance = sphereRadius - Distance(sphereOrigin, p);
float3 moveDirection = (p - sphereOrigin) / Distance(sphereOrigin, p);
float3 moveLength = moveDirection * moveDistance;
return moveLength;
}
// ------------------Topology---------------------
void Topology::Save(std::ofstream& ofs)
{
IO::SaveBuffer(indices, ofs);
IO::SaveBuffer(posBuffer, ofs);
IO::SaveBuffer(primList, ofs);
}
|
3fe9f6d2003f6adb2740ee9281e95d047bf12b61.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "includes.h"
__global__ void all_dots(int n, int k, double* data_dots, double* centroid_dots, double* dots) {
__shared__ double local_data_dots[32];
__shared__ double local_centroid_dots[32];
int data_index = threadIdx.x + blockIdx.x * blockDim.x;
if ((data_index < n) && (threadIdx.y == 0)) {
local_data_dots[threadIdx.x] = data_dots[data_index];
}
int centroid_index = threadIdx.x + blockIdx.y * blockDim.y;
if ((centroid_index < k) && (threadIdx.y == 1)) {
local_centroid_dots[threadIdx.x] = centroid_dots[centroid_index];
}
__syncthreads();
centroid_index = threadIdx.y + blockIdx.y * blockDim.y;
if ((data_index < n) && (centroid_index < k)) {
dots[data_index + centroid_index * n] = local_data_dots[threadIdx.x] +
local_centroid_dots[threadIdx.y];
}
} | 3fe9f6d2003f6adb2740ee9281e95d047bf12b61.cu | #include "includes.h"
__global__ void all_dots(int n, int k, double* data_dots, double* centroid_dots, double* dots) {
__shared__ double local_data_dots[32];
__shared__ double local_centroid_dots[32];
int data_index = threadIdx.x + blockIdx.x * blockDim.x;
if ((data_index < n) && (threadIdx.y == 0)) {
local_data_dots[threadIdx.x] = data_dots[data_index];
}
int centroid_index = threadIdx.x + blockIdx.y * blockDim.y;
if ((centroid_index < k) && (threadIdx.y == 1)) {
local_centroid_dots[threadIdx.x] = centroid_dots[centroid_index];
}
__syncthreads();
centroid_index = threadIdx.y + blockIdx.y * blockDim.y;
if ((data_index < n) && (centroid_index < k)) {
dots[data_index + centroid_index * n] = local_data_dots[threadIdx.x] +
local_centroid_dots[threadIdx.y];
}
} |
5c995d3d57a3b4726ee45b94970cd96735d13005.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/*************************************************************************
* Copyright (c) 2015, NVIDIA CORPORATION. All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
* * Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* * Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
* * Neither the name of NVIDIA CORPORATION nor the names of its
* contributors may be used to endorse or promote products derived
* from this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ``AS IS'' AND ANY
* EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
* PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
* CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
* EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
* PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
* PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
* OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
************************************************************************/
#include <cassert>
#include "core.h"
#include "common_kernel.h"
#include "copy_kernel.h"
#include "enqueue.h"
#include "reduce_kernel.h"
/* HIERARCHY
*
* The data is split into CHUNKS, and each CHUNK is split into NUM_SUBCHUNKS
* SUBCHUNKS, where each SUBCHUNK is an independent, complete reduction. Each
* GPU has a buffer that can fit an entire CHUNK, so that all SUBCHUNKS can be
* processed without checking that the buffer on the receiving GPU is empty. A
* SUBCHUNK is split into NUM_GPUS SLICES and each GPU works on a different
* SLICE at the same time. Before moving on the the next SLICE in the reduction
* algorithm, the GPU has to check whether it has received the data from the
* previous GPU it needs for this SLICE. To hide the latency of this
* communication, each GPU processes all the SLICES of all the SUBCHUNKS in
* sequence before moving on to the next SLICE. Each SLICE is split into a
* certain number of UNROLLS (determined by the buffer size) and each thread
* performs UNROLL_COUNT single-data-element operations inside an UNROLL. As the
* name suggests, the UNROLL_COUNT operations within an UNROLL are unrolled.
*/
// Number of threads used to perform copies, etc. Must be multiple of 32.
// An additional thread is used to handle threadfences, so the CUDA blocks
// have dimension NUM_THREADS+1.
#define NUM_THREADS 256
// Each thread unrolls the innermost loop of the copy or reduction operations
// to this many single-data-element instructions
#define UNROLL_COUNT 8
#define UNROLL_SIZE (UNROLL_COUNT * NUM_THREADS)
// To hide the latency associated with the synchronization between different
// subchunks, we interleave the independent subchunks so that more data can be
// transferred while the sync is in progress. This is the number of subchunks
// that are active at the same time
#define NUM_SUBCHUNKS 2
/*
* numGPUs BLOCKs consisting of recvcount words each
* BLOCK is split up into NumChunks CHUNKs
* CHUNK is split up into NUM_SUBCHUNKS SUBCHUNKs
* SUBCHUNK consists of exactly one SLICE
* SLICE is most efficiently processed in multiples of UNROLL_SIZE
*
* The algorithm has numGPUs steps and each step processes a SLICE (i.e.
* SUBCHUNK) of a different BLOCK. Only data of the BLOCKs not resident on the
* GPU need to be communicated, hence (numGPUs - 1) BLOCKs. So the buffer needs
* to have room for (numGPUs - 1) SLICEs.
*/
// do not encode the subchunk number into the flag, because there is a separate
// flag for each subchunk
// If this is called with STEP, it means that we just finished processing the
// data for step STEP on this GPU, which is the data required on the next GPU
// for step STEP + 1, so we signal the next GPU that its data for step STEP + 1
// is available. This is called by one particular consumer warp and so we select
// the first thread in the warp to set the flag.
#define SIGNAL_NEW_DATA_AVAILABLE(chunk, subchunk, step) \
do { \
args.NextNewDataAvailableFlag[0] = \
2*((chunk) * args.NumGPUs + (step)) + subchunk + 1; \
} while (0)
// This is called by all producer threads, but only thread 0 spins on the flag,
// all threads synchronize after thread 0 is done spinning.
#define WAIT_FOR_NEW_DATA(chunk, subchunk, step) \
do { \
if (tid == 0) { \
Wait([=] { \
return ((volatile int *)args.ThisNewDataAvailableFlag)[0] >= \
2*((chunk) * args.NumGPUs + (step)) + subchunk - 1; \
}); \
} \
BAR(sync, 1, NUM_THREADS); \
} while (0)
// If this is called with CHUNK, it means that this GPU has just finished
// processing the chunk CHUNK and so the previous GPU can start with CHUNK + 1
#define SIGNAL_CHUNK_DONE(chunk, subchunk) \
do { \
args.PrevChunkDoneFlag[0] = 2*(chunk) + subchunk + 1; \
} while (0)
// This is called by all producer threads, but only thread 0 spins on the flag,
// all threads synchronize after thread 0 is done spinning.
#define WAIT_FOR_CHUNK(chunk, subchunk) \
do { \
if (tid == 0) { \
Wait([=] { \
return ((volatile int *)args.ThisChunkDoneFlag)[0] >= \
2*(chunk) + subchunk - 1; \
}); \
} \
BAR(sync, 1, NUM_THREADS); \
} while (0)
__device__ inline void getSliceSizeAndChunkSize(int *sliceSize, int slice,
int numSlices, int numBigSlices, int numSmallSlices, int bigSliceN,
int smallSliceN, int lastSliceN) {
if (slice < numBigSlices) {
*sliceSize = bigSliceN;
} else {
*sliceSize = (slice < numBigSlices + numSmallSlices) ? smallSliceN
: ((slice == numSlices - 1) ? lastSliceN : 0);
}
/* if (threadIdx.x == 0)
printf("[sliceSize=%d] slice=%d numSlices=%d "
"numBigSlices=%d numSmallSlices=%d bigSliceN=%d smallSliceN=%d "
"lastSliceN=%d\n", *sliceSize, slice, numSlices, numBigSlices,
numSmallSlices, bigSliceN, smallSliceN, lastSliceN);
*/
}
template<typename T>
struct ReduceScatterKernelArgs {
// general parameters
int ThisId;
int NumGPUs;
int N;
int * UserFromRing;
// some pre-computed sizes
int SliceSize;
int ChunkSize;
int NumChunks;
int BufferSliceStride;
int BufferMisalignedN;
// local and remote input, output, and buffer
const T * __restrict__ ThisInput;
volatile T * __restrict__ ThisOutput;
volatile T * __restrict__ ThisBuffer;
volatile T * __restrict__ NextBuffer;
// local and remote flags
volatile int * __restrict__ ThisNewDataAvailableFlag;
volatile int * __restrict__ NextNewDataAvailableFlag;
volatile int * __restrict__ ThisChunkDoneFlag;
volatile int * __restrict__ PrevChunkDoneFlag;
};
__device__ inline int GetBlock(const int index, const int step,
const int * const userFromRing, const int numGPUs) {
return userFromRing[(numGPUs + index - 1 - step) % numGPUs];
}
template<int THREADS, int UNROLL, class FUNC, typename T>
__global__ void ReduceScatterKernel(const ReduceScatterKernelArgs<T> args) {
if (args.N == 0) return;
int tid = threadIdx.x;
for (int chunk = 0; chunk < args.NumChunks; ++chunk) {
// calculate slice size. for all chunks except (possibly) the last one,
// this will just be args.SliceSize. For the last one, it may be smaller
int bigSliceN = args.SliceSize;
int smallSliceN = 0;
int lastSliceN = 0;
int numSlices = NUM_SUBCHUNKS;
int numBigSlices = numSlices;
int numSmallSlices = 0;
// last chunk
if ((chunk + 1 == args.NumChunks) && (args.N % args.ChunkSize > 0))
CalcLastChunk<THREADS, UNROLL, T>(&bigSliceN, &smallSliceN, &lastSliceN,
&numSlices, &numBigSlices, &numSmallSlices, args.N, args.NumChunks,
args.ChunkSize);
// this offset is only applied to Data pointers, not to Buffer pointers,
// since we only have one buffer per chunk
int chunkOffset = chunk * args.ChunkSize;
// step 0: push data to next GPU
int step = 0;
int block = GetBlock(args.ThisId, step, args.UserFromRing, args.NumGPUs);
int blockOffset = chunkOffset + block * args.N;
int bufferOffset = block * NUM_SUBCHUNKS * args.BufferSliceStride +
((block * args.BufferMisalignedN) % alignof(PackType));
int sliceSize;
if (tid < NUM_THREADS) {
for(int s=0; s<NUM_SUBCHUNKS; ++s) {
getSliceSizeAndChunkSize(&sliceSize, s, numSlices, numBigSlices,
numSmallSlices, bigSliceN, smallSliceN, lastSliceN);
WAIT_FOR_CHUNK(chunk, s);
Copy<UNROLL, THREADS>(
args.NextBuffer + bufferOffset,
args.ThisInput + blockOffset,
sliceSize);
__syncthreads();
bufferOffset += sliceSize;
blockOffset += sliceSize;
}
} else { // Is consumer
for(int s=0; s<NUM_SUBCHUNKS; ++s) {
__syncthreads();
SIGNAL_NEW_DATA_AVAILABLE(chunk, s, step);
}
}
// steps j with 0 < j < k - 1, where k = number of GPUs: reduce and copy to
// next GPU
for (step = 1; step < args.NumGPUs - 1; ++step) {
int block = GetBlock(args.ThisId, step, args.UserFromRing, args.NumGPUs);
int blockOffset = chunkOffset + block * args.N;
int bufferOffset = block * NUM_SUBCHUNKS * args.BufferSliceStride +
((block * args.BufferMisalignedN) % alignof(PackType));
if (tid < NUM_THREADS) {
for(int s=0; s<NUM_SUBCHUNKS; ++s) {
getSliceSizeAndChunkSize(&sliceSize, s, numSlices, numBigSlices,
numSmallSlices, bigSliceN, smallSliceN, lastSliceN);
WAIT_FOR_NEW_DATA(chunk, s, step);
Reduce<UNROLL, THREADS, FUNC>(
args.NextBuffer + bufferOffset,
args.ThisBuffer + bufferOffset,
args.ThisInput + blockOffset,
sliceSize);
__syncthreads();
bufferOffset += sliceSize;
blockOffset += sliceSize;
}
} else {
for(int s=0; s<NUM_SUBCHUNKS; ++s) {
__syncthreads();
SIGNAL_NEW_DATA_AVAILABLE(chunk, s, step);
}
}
}
// step k - 1: reduce this buffer and data, which will produce the final
// result that we store in this data and push to the next GPU
step = args.NumGPUs - 1;
block = GetBlock(args.ThisId, step, args.UserFromRing, args.NumGPUs);
blockOffset = chunkOffset + block * args.N;
bufferOffset = block * NUM_SUBCHUNKS * args.BufferSliceStride +
((block * args.BufferMisalignedN) % alignof(PackType));
if (tid < NUM_THREADS) {
int outputOffset = 0;
for (int s=0; s<NUM_SUBCHUNKS; ++s) {
getSliceSizeAndChunkSize(&sliceSize, s, numSlices, numBigSlices,
numSmallSlices, bigSliceN, smallSliceN, lastSliceN);
WAIT_FOR_NEW_DATA(chunk, s, step);
Reduce<UNROLL, THREADS, FUNC>(
args.ThisOutput + (chunkOffset + outputOffset),
args.ThisBuffer + bufferOffset,
args.ThisInput + blockOffset,
sliceSize);
__syncthreads();
outputOffset += sliceSize;
bufferOffset += sliceSize;
blockOffset += sliceSize;
}
} else {
for (int s=0; s<NUM_SUBCHUNKS; ++s) {
__syncthreads();
SIGNAL_NEW_DATA_AVAILABLE(chunk, s, step);
// signal that chunk is done if this is not the last chunk
if (chunk + 1 < args.NumChunks) {
SIGNAL_CHUNK_DONE(chunk, s);
}
}
}
}
// wait for the last data to be pushed to us
if (tid < NUM_THREADS) {
WAIT_FOR_NEW_DATA(args.NumChunks, NUM_SUBCHUNKS-1, 0);
if (tid == 0) {
args.ThisNewDataAvailableFlag[tid] = 0;
args.ThisChunkDoneFlag[tid] = 0;
}
}
}
template<class FUNC, typename T>
ncclResult_t ncclReduceScatterWithTypeAndFunc(const void* sendbuff,
void* recvbuff, const int recvcount, ncclComm* comm, hipStream_t stream) {
if (recvcount == 0) {
return ncclSuccess;
}
int index = comm->ncclId;
int blockSizeInBytes = recvcount * sizeof(T);
int misalignedBytes = blockSizeInBytes % alignof(uint64_t);
assert((int)((misalignedBytes / sizeof(T)) * sizeof(T)) == misalignedBytes);
int misalignedN = misalignedBytes / sizeof(T);
assert(misalignedN < (int)(sizeof(uint64_t) / sizeof(T)));
int paddingN = (misalignedN > 0) ? sizeof(uint64_t) / sizeof(T) : 0;
// There is one slice per GPU, so a slice can be at most bufferN / numGPUs,
// where bufferN is the number of elements of type T that fit into the buffer.
// For efficiency, we want the slice size to be a multiple of UNROLL_SIZE
int bufferN = comm->buffSize / sizeof(T);
// we only need buffer for k slices and k*k paddings (we need k paddings per
// block and we have k blocks)
int bufferNPerSlice = (bufferN - NUM_SUBCHUNKS * comm->nDev * paddingN) /
(NUM_SUBCHUNKS * comm->nDev);
int sliceSize = (bufferNPerSlice / UNROLL_SIZE) * UNROLL_SIZE;
int nextId = (index + 1) % comm->nDev;
int prevId = (index + comm->nDev - 1) % comm->nDev;
ReduceScatterKernelArgs<T> args;
args.ThisId = index;
args.NumGPUs = comm->nDev;
args.N = recvcount;
/* Block j must end up in recvbuff[j], which lives on device with logical
* index comm->ringFromUser[j]. But the block ordering does not necessarily
* follow the ring ordering. Hence the order in which a particular GPU
* processes the different blocks (the correspondence between the step in
* the reduction algorithm and the block on which a GPU operates in that
* particular step) is not the same as the ring order.
*
* Say we have 4 GPUs and comm->userFromRing = { 1, 2, 0, 3 }. Then there are 4
* step in the reduction algorithm and block 0 needs to end up device 2,
* block 1 on device 0, block 2 on device 1, and block 3 needs to end up on
* device 3. In the last step of the algorithm, each GPU must be processing
* the block that will end up on that GPU. The blocks that a GPU has to
* process in the previous steps is determined by the next step because each
* GPU only hands off data to the next GPU in the ring.
*
* In the above example, we get the following table of which block is
* processed by each GPU in a given step. The columns correspond to the
* different GPUs while the rows are the steps in the algorithm.
*
* GPU 0 1 2 3
* step
* 0 3 1 2 0
* 1 0 3 1 2
* 2 2 0 3 1
* 3 1 2 0 3
*
* We note the the rows in the above table are just comm->userFromRing in the last
* step and the list is cyclicly permuted to the left for each previous
* step. The columns, which are what the individual GPUs need to know, are
* comm->userFromRing traversed backwards and starting at index k-1 for GPU k.
* These columns are what we put into args.BlockVsStep to tell the GPU which
* block it needs to be processing at a particular step. */
args.UserFromRing = comm->devUserFromRing;
args.SliceSize = sliceSize;
args.ChunkSize = NUM_SUBCHUNKS * args.SliceSize;
// don't reduce this if we cut the slice size in half below, because if that
// happens, the last chunk will be larger than the other chunks, and we will
// need the extra buffer space
args.BufferSliceStride = args.SliceSize + paddingN;
args.BufferMisalignedN = misalignedN;
// avoid a case where we have one or more big chunks and one tiny one
int remainder = args.N % args.ChunkSize;
if ((args.N > args.ChunkSize) && (remainder > 0) &&
(args.N < 5 * args.ChunkSize) && (2 * remainder < args.ChunkSize)) {
args.SliceSize /= 2;
args.ChunkSize = NUM_SUBCHUNKS * args.SliceSize;
// round down so we end up with a big last chunk
args.NumChunks = args.N / args.ChunkSize;
} else {
// round up
args.NumChunks = (args.N + args.ChunkSize - 1) / args.ChunkSize;
}
// printf("sliceSize = %i, chunkSize = %i, numChunks = %i, sliceStride = %i, misalignedN = %i\n", args.SliceSize, args.ChunkSize, args.NumChunks, args.BufferSliceStride, args.BufferMisalignedN);
args.ThisInput = (const T*)sendbuff;
args.ThisOutput = (volatile T*)recvbuff;
args.ThisBuffer = (volatile T*)comm->local[prevId]->buff;
args.NextBuffer = (volatile T*)comm->remote[nextId]->buff;
// we need 2 * NUM_SUBCHUNKS flags, so use the first NUM_SUBCHUNKS flags
// to signal the next GPU that new data is available and the following
// NUM_SUBCHUNKS to signal the previous GPU that a chunk is finished
args.ThisNewDataAvailableFlag = comm->local[prevId]->flags;
args.NextNewDataAvailableFlag = comm->remote[nextId]->flags;
args.ThisChunkDoneFlag = comm->local[nextId]->flags + 1;
args.PrevChunkDoneFlag = comm->remote[prevId]->flags + 1;
hipLaunchKernelGGL(( ReduceScatterKernel<NUM_THREADS, UNROLL_COUNT, FUNC, T>)
, dim3(1), dim3(NUM_THREADS + NUM_SUBCHUNKS * WARP_SIZE), 0, stream, args);
return ncclSuccess;
}
template<typename T>
ncclResult_t ncclReduceScatterWithType(const void* sendbuff, void* recvbuff,
int recvcount, ncclRedOp_t op, ncclComm* comm, hipStream_t stream) {
switch (op) {
case ncclSum:
return ncclReduceScatterWithTypeAndFunc<FuncSum<T>, T>(
sendbuff, recvbuff, recvcount, comm, stream);
case ncclProd:
return ncclReduceScatterWithTypeAndFunc<FuncProd<T>, T>(
sendbuff, recvbuff, recvcount, comm, stream);
case ncclMax:
return ncclReduceScatterWithTypeAndFunc<FuncMax<T>, T>(
sendbuff, recvbuff, recvcount, comm, stream);
case ncclMin:
return ncclReduceScatterWithTypeAndFunc<FuncMin<T>, T>(
sendbuff, recvbuff, recvcount, comm, stream);
}
return ncclInvalidOperation;
}
class ReduceScatterFunctor {
public:
ncclResult_t operator()(const void* sendbuff, void* recvbuff,
int recvcount, ncclDataType_t datatype, ncclRedOp_t op, int /*root*/,
ncclComm* comm, hipStream_t stream) {
switch (datatype) {
case ncclChar:
return ncclReduceScatterWithType<char>(sendbuff, recvbuff, recvcount,
op, comm, stream);
case ncclInt:
return ncclReduceScatterWithType<int>(sendbuff, recvbuff, recvcount,
op, comm, stream);
#ifdef CUDA_HAS_HALF
case ncclHalf:
return ncclReduceScatterWithType<half>(sendbuff, recvbuff, recvcount,
op, comm, stream);
#endif
case ncclFloat:
return ncclReduceScatterWithType<float>(sendbuff, recvbuff, recvcount,
op, comm, stream);
case ncclDouble:
return ncclReduceScatterWithType<double>(sendbuff, recvbuff, recvcount,
op, comm, stream);
}
return ncclInvalidType;
}
};
extern "C" DSOGLOBAL
ncclResult_t ncclReduceScatter(const void* sendbuff, void* recvbuff,
int recvcount, ncclDataType_t datatype, ncclRedOp_t op, ncclComm* comm,
hipStream_t stream) {
return enqueue(ReduceScatterFunctor(), sendbuff, recvbuff, recvcount,
datatype, op, 0, comm, stream);
}
| 5c995d3d57a3b4726ee45b94970cd96735d13005.cu | /*************************************************************************
* Copyright (c) 2015, NVIDIA CORPORATION. All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
* * Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* * Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
* * Neither the name of NVIDIA CORPORATION nor the names of its
* contributors may be used to endorse or promote products derived
* from this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ``AS IS'' AND ANY
* EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
* PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
* CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
* EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
* PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
* PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
* OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
************************************************************************/
#include <cassert>
#include "core.h"
#include "common_kernel.h"
#include "copy_kernel.h"
#include "enqueue.h"
#include "reduce_kernel.h"
/* HIERARCHY
*
* The data is split into CHUNKS, and each CHUNK is split into NUM_SUBCHUNKS
* SUBCHUNKS, where each SUBCHUNK is an independent, complete reduction. Each
* GPU has a buffer that can fit an entire CHUNK, so that all SUBCHUNKS can be
* processed without checking that the buffer on the receiving GPU is empty. A
* SUBCHUNK is split into NUM_GPUS SLICES and each GPU works on a different
* SLICE at the same time. Before moving on the the next SLICE in the reduction
* algorithm, the GPU has to check whether it has received the data from the
* previous GPU it needs for this SLICE. To hide the latency of this
* communication, each GPU processes all the SLICES of all the SUBCHUNKS in
* sequence before moving on to the next SLICE. Each SLICE is split into a
* certain number of UNROLLS (determined by the buffer size) and each thread
* performs UNROLL_COUNT single-data-element operations inside an UNROLL. As the
* name suggests, the UNROLL_COUNT operations within an UNROLL are unrolled.
*/
// Number of threads used to perform copies, etc. Must be multiple of 32.
// An additional thread is used to handle threadfences, so the CUDA blocks
// have dimension NUM_THREADS+1.
#define NUM_THREADS 256
// Each thread unrolls the innermost loop of the copy or reduction operations
// to this many single-data-element instructions
#define UNROLL_COUNT 8
#define UNROLL_SIZE (UNROLL_COUNT * NUM_THREADS)
// To hide the latency associated with the synchronization between different
// subchunks, we interleave the independent subchunks so that more data can be
// transferred while the sync is in progress. This is the number of subchunks
// that are active at the same time
#define NUM_SUBCHUNKS 2
/*
* numGPUs BLOCKs consisting of recvcount words each
* BLOCK is split up into NumChunks CHUNKs
* CHUNK is split up into NUM_SUBCHUNKS SUBCHUNKs
* SUBCHUNK consists of exactly one SLICE
* SLICE is most efficiently processed in multiples of UNROLL_SIZE
*
* The algorithm has numGPUs steps and each step processes a SLICE (i.e.
* SUBCHUNK) of a different BLOCK. Only data of the BLOCKs not resident on the
* GPU need to be communicated, hence (numGPUs - 1) BLOCKs. So the buffer needs
* to have room for (numGPUs - 1) SLICEs.
*/
// do not encode the subchunk number into the flag, because there is a separate
// flag for each subchunk
// If this is called with STEP, it means that we just finished processing the
// data for step STEP on this GPU, which is the data required on the next GPU
// for step STEP + 1, so we signal the next GPU that its data for step STEP + 1
// is available. This is called by one particular consumer warp and so we select
// the first thread in the warp to set the flag.
#define SIGNAL_NEW_DATA_AVAILABLE(chunk, subchunk, step) \
do { \
args.NextNewDataAvailableFlag[0] = \
2*((chunk) * args.NumGPUs + (step)) + subchunk + 1; \
} while (0)
// This is called by all producer threads, but only thread 0 spins on the flag,
// all threads synchronize after thread 0 is done spinning.
#define WAIT_FOR_NEW_DATA(chunk, subchunk, step) \
do { \
if (tid == 0) { \
Wait([=] { \
return ((volatile int *)args.ThisNewDataAvailableFlag)[0] >= \
2*((chunk) * args.NumGPUs + (step)) + subchunk - 1; \
}); \
} \
BAR(sync, 1, NUM_THREADS); \
} while (0)
// If this is called with CHUNK, it means that this GPU has just finished
// processing the chunk CHUNK and so the previous GPU can start with CHUNK + 1
#define SIGNAL_CHUNK_DONE(chunk, subchunk) \
do { \
args.PrevChunkDoneFlag[0] = 2*(chunk) + subchunk + 1; \
} while (0)
// This is called by all producer threads, but only thread 0 spins on the flag,
// all threads synchronize after thread 0 is done spinning.
#define WAIT_FOR_CHUNK(chunk, subchunk) \
do { \
if (tid == 0) { \
Wait([=] { \
return ((volatile int *)args.ThisChunkDoneFlag)[0] >= \
2*(chunk) + subchunk - 1; \
}); \
} \
BAR(sync, 1, NUM_THREADS); \
} while (0)
__device__ inline void getSliceSizeAndChunkSize(int *sliceSize, int slice,
int numSlices, int numBigSlices, int numSmallSlices, int bigSliceN,
int smallSliceN, int lastSliceN) {
if (slice < numBigSlices) {
*sliceSize = bigSliceN;
} else {
*sliceSize = (slice < numBigSlices + numSmallSlices) ? smallSliceN
: ((slice == numSlices - 1) ? lastSliceN : 0);
}
/* if (threadIdx.x == 0)
printf("[sliceSize=%d] slice=%d numSlices=%d "
"numBigSlices=%d numSmallSlices=%d bigSliceN=%d smallSliceN=%d "
"lastSliceN=%d\n", *sliceSize, slice, numSlices, numBigSlices,
numSmallSlices, bigSliceN, smallSliceN, lastSliceN);
*/
}
template<typename T>
struct ReduceScatterKernelArgs {
// general parameters
int ThisId;
int NumGPUs;
int N;
int * UserFromRing;
// some pre-computed sizes
int SliceSize;
int ChunkSize;
int NumChunks;
int BufferSliceStride;
int BufferMisalignedN;
// local and remote input, output, and buffer
const T * __restrict__ ThisInput;
volatile T * __restrict__ ThisOutput;
volatile T * __restrict__ ThisBuffer;
volatile T * __restrict__ NextBuffer;
// local and remote flags
volatile int * __restrict__ ThisNewDataAvailableFlag;
volatile int * __restrict__ NextNewDataAvailableFlag;
volatile int * __restrict__ ThisChunkDoneFlag;
volatile int * __restrict__ PrevChunkDoneFlag;
};
__device__ inline int GetBlock(const int index, const int step,
const int * const userFromRing, const int numGPUs) {
return userFromRing[(numGPUs + index - 1 - step) % numGPUs];
}
template<int THREADS, int UNROLL, class FUNC, typename T>
__global__ void ReduceScatterKernel(const ReduceScatterKernelArgs<T> args) {
if (args.N == 0) return;
int tid = threadIdx.x;
for (int chunk = 0; chunk < args.NumChunks; ++chunk) {
// calculate slice size. for all chunks except (possibly) the last one,
// this will just be args.SliceSize. For the last one, it may be smaller
int bigSliceN = args.SliceSize;
int smallSliceN = 0;
int lastSliceN = 0;
int numSlices = NUM_SUBCHUNKS;
int numBigSlices = numSlices;
int numSmallSlices = 0;
// last chunk
if ((chunk + 1 == args.NumChunks) && (args.N % args.ChunkSize > 0))
CalcLastChunk<THREADS, UNROLL, T>(&bigSliceN, &smallSliceN, &lastSliceN,
&numSlices, &numBigSlices, &numSmallSlices, args.N, args.NumChunks,
args.ChunkSize);
// this offset is only applied to Data pointers, not to Buffer pointers,
// since we only have one buffer per chunk
int chunkOffset = chunk * args.ChunkSize;
// step 0: push data to next GPU
int step = 0;
int block = GetBlock(args.ThisId, step, args.UserFromRing, args.NumGPUs);
int blockOffset = chunkOffset + block * args.N;
int bufferOffset = block * NUM_SUBCHUNKS * args.BufferSliceStride +
((block * args.BufferMisalignedN) % alignof(PackType));
int sliceSize;
if (tid < NUM_THREADS) {
for(int s=0; s<NUM_SUBCHUNKS; ++s) {
getSliceSizeAndChunkSize(&sliceSize, s, numSlices, numBigSlices,
numSmallSlices, bigSliceN, smallSliceN, lastSliceN);
WAIT_FOR_CHUNK(chunk, s);
Copy<UNROLL, THREADS>(
args.NextBuffer + bufferOffset,
args.ThisInput + blockOffset,
sliceSize);
__syncthreads();
bufferOffset += sliceSize;
blockOffset += sliceSize;
}
} else { // Is consumer
for(int s=0; s<NUM_SUBCHUNKS; ++s) {
__syncthreads();
SIGNAL_NEW_DATA_AVAILABLE(chunk, s, step);
}
}
// steps j with 0 < j < k - 1, where k = number of GPUs: reduce and copy to
// next GPU
for (step = 1; step < args.NumGPUs - 1; ++step) {
int block = GetBlock(args.ThisId, step, args.UserFromRing, args.NumGPUs);
int blockOffset = chunkOffset + block * args.N;
int bufferOffset = block * NUM_SUBCHUNKS * args.BufferSliceStride +
((block * args.BufferMisalignedN) % alignof(PackType));
if (tid < NUM_THREADS) {
for(int s=0; s<NUM_SUBCHUNKS; ++s) {
getSliceSizeAndChunkSize(&sliceSize, s, numSlices, numBigSlices,
numSmallSlices, bigSliceN, smallSliceN, lastSliceN);
WAIT_FOR_NEW_DATA(chunk, s, step);
Reduce<UNROLL, THREADS, FUNC>(
args.NextBuffer + bufferOffset,
args.ThisBuffer + bufferOffset,
args.ThisInput + blockOffset,
sliceSize);
__syncthreads();
bufferOffset += sliceSize;
blockOffset += sliceSize;
}
} else {
for(int s=0; s<NUM_SUBCHUNKS; ++s) {
__syncthreads();
SIGNAL_NEW_DATA_AVAILABLE(chunk, s, step);
}
}
}
// step k - 1: reduce this buffer and data, which will produce the final
// result that we store in this data and push to the next GPU
step = args.NumGPUs - 1;
block = GetBlock(args.ThisId, step, args.UserFromRing, args.NumGPUs);
blockOffset = chunkOffset + block * args.N;
bufferOffset = block * NUM_SUBCHUNKS * args.BufferSliceStride +
((block * args.BufferMisalignedN) % alignof(PackType));
if (tid < NUM_THREADS) {
int outputOffset = 0;
for (int s=0; s<NUM_SUBCHUNKS; ++s) {
getSliceSizeAndChunkSize(&sliceSize, s, numSlices, numBigSlices,
numSmallSlices, bigSliceN, smallSliceN, lastSliceN);
WAIT_FOR_NEW_DATA(chunk, s, step);
Reduce<UNROLL, THREADS, FUNC>(
args.ThisOutput + (chunkOffset + outputOffset),
args.ThisBuffer + bufferOffset,
args.ThisInput + blockOffset,
sliceSize);
__syncthreads();
outputOffset += sliceSize;
bufferOffset += sliceSize;
blockOffset += sliceSize;
}
} else {
for (int s=0; s<NUM_SUBCHUNKS; ++s) {
__syncthreads();
SIGNAL_NEW_DATA_AVAILABLE(chunk, s, step);
// signal that chunk is done if this is not the last chunk
if (chunk + 1 < args.NumChunks) {
SIGNAL_CHUNK_DONE(chunk, s);
}
}
}
}
// wait for the last data to be pushed to us
if (tid < NUM_THREADS) {
WAIT_FOR_NEW_DATA(args.NumChunks, NUM_SUBCHUNKS-1, 0);
if (tid == 0) {
args.ThisNewDataAvailableFlag[tid] = 0;
args.ThisChunkDoneFlag[tid] = 0;
}
}
}
template<class FUNC, typename T>
ncclResult_t ncclReduceScatterWithTypeAndFunc(const void* sendbuff,
void* recvbuff, const int recvcount, ncclComm* comm, cudaStream_t stream) {
if (recvcount == 0) {
return ncclSuccess;
}
int index = comm->ncclId;
int blockSizeInBytes = recvcount * sizeof(T);
int misalignedBytes = blockSizeInBytes % alignof(uint64_t);
assert((int)((misalignedBytes / sizeof(T)) * sizeof(T)) == misalignedBytes);
int misalignedN = misalignedBytes / sizeof(T);
assert(misalignedN < (int)(sizeof(uint64_t) / sizeof(T)));
int paddingN = (misalignedN > 0) ? sizeof(uint64_t) / sizeof(T) : 0;
// There is one slice per GPU, so a slice can be at most bufferN / numGPUs,
// where bufferN is the number of elements of type T that fit into the buffer.
// For efficiency, we want the slice size to be a multiple of UNROLL_SIZE
int bufferN = comm->buffSize / sizeof(T);
// we only need buffer for k slices and k*k paddings (we need k paddings per
// block and we have k blocks)
int bufferNPerSlice = (bufferN - NUM_SUBCHUNKS * comm->nDev * paddingN) /
(NUM_SUBCHUNKS * comm->nDev);
int sliceSize = (bufferNPerSlice / UNROLL_SIZE) * UNROLL_SIZE;
int nextId = (index + 1) % comm->nDev;
int prevId = (index + comm->nDev - 1) % comm->nDev;
ReduceScatterKernelArgs<T> args;
args.ThisId = index;
args.NumGPUs = comm->nDev;
args.N = recvcount;
/* Block j must end up in recvbuff[j], which lives on device with logical
* index comm->ringFromUser[j]. But the block ordering does not necessarily
* follow the ring ordering. Hence the order in which a particular GPU
* processes the different blocks (the correspondence between the step in
* the reduction algorithm and the block on which a GPU operates in that
* particular step) is not the same as the ring order.
*
* Say we have 4 GPUs and comm->userFromRing = { 1, 2, 0, 3 }. Then there are 4
* step in the reduction algorithm and block 0 needs to end up device 2,
* block 1 on device 0, block 2 on device 1, and block 3 needs to end up on
* device 3. In the last step of the algorithm, each GPU must be processing
* the block that will end up on that GPU. The blocks that a GPU has to
* process in the previous steps is determined by the next step because each
* GPU only hands off data to the next GPU in the ring.
*
* In the above example, we get the following table of which block is
* processed by each GPU in a given step. The columns correspond to the
* different GPUs while the rows are the steps in the algorithm.
*
* GPU 0 1 2 3
* step
* 0 3 1 2 0
* 1 0 3 1 2
* 2 2 0 3 1
* 3 1 2 0 3
*
* We note the the rows in the above table are just comm->userFromRing in the last
* step and the list is cyclicly permuted to the left for each previous
* step. The columns, which are what the individual GPUs need to know, are
* comm->userFromRing traversed backwards and starting at index k-1 for GPU k.
* These columns are what we put into args.BlockVsStep to tell the GPU which
* block it needs to be processing at a particular step. */
args.UserFromRing = comm->devUserFromRing;
args.SliceSize = sliceSize;
args.ChunkSize = NUM_SUBCHUNKS * args.SliceSize;
// don't reduce this if we cut the slice size in half below, because if that
// happens, the last chunk will be larger than the other chunks, and we will
// need the extra buffer space
args.BufferSliceStride = args.SliceSize + paddingN;
args.BufferMisalignedN = misalignedN;
// avoid a case where we have one or more big chunks and one tiny one
int remainder = args.N % args.ChunkSize;
if ((args.N > args.ChunkSize) && (remainder > 0) &&
(args.N < 5 * args.ChunkSize) && (2 * remainder < args.ChunkSize)) {
args.SliceSize /= 2;
args.ChunkSize = NUM_SUBCHUNKS * args.SliceSize;
// round down so we end up with a big last chunk
args.NumChunks = args.N / args.ChunkSize;
} else {
// round up
args.NumChunks = (args.N + args.ChunkSize - 1) / args.ChunkSize;
}
// printf("sliceSize = %i, chunkSize = %i, numChunks = %i, sliceStride = %i, misalignedN = %i\n", args.SliceSize, args.ChunkSize, args.NumChunks, args.BufferSliceStride, args.BufferMisalignedN);
args.ThisInput = (const T*)sendbuff;
args.ThisOutput = (volatile T*)recvbuff;
args.ThisBuffer = (volatile T*)comm->local[prevId]->buff;
args.NextBuffer = (volatile T*)comm->remote[nextId]->buff;
// we need 2 * NUM_SUBCHUNKS flags, so use the first NUM_SUBCHUNKS flags
// to signal the next GPU that new data is available and the following
// NUM_SUBCHUNKS to signal the previous GPU that a chunk is finished
args.ThisNewDataAvailableFlag = comm->local[prevId]->flags;
args.NextNewDataAvailableFlag = comm->remote[nextId]->flags;
args.ThisChunkDoneFlag = comm->local[nextId]->flags + 1;
args.PrevChunkDoneFlag = comm->remote[prevId]->flags + 1;
ReduceScatterKernel<NUM_THREADS, UNROLL_COUNT, FUNC, T>
<<<1, NUM_THREADS + NUM_SUBCHUNKS * WARP_SIZE, 0, stream>>>(args);
return ncclSuccess;
}
template<typename T>
ncclResult_t ncclReduceScatterWithType(const void* sendbuff, void* recvbuff,
int recvcount, ncclRedOp_t op, ncclComm* comm, cudaStream_t stream) {
switch (op) {
case ncclSum:
return ncclReduceScatterWithTypeAndFunc<FuncSum<T>, T>(
sendbuff, recvbuff, recvcount, comm, stream);
case ncclProd:
return ncclReduceScatterWithTypeAndFunc<FuncProd<T>, T>(
sendbuff, recvbuff, recvcount, comm, stream);
case ncclMax:
return ncclReduceScatterWithTypeAndFunc<FuncMax<T>, T>(
sendbuff, recvbuff, recvcount, comm, stream);
case ncclMin:
return ncclReduceScatterWithTypeAndFunc<FuncMin<T>, T>(
sendbuff, recvbuff, recvcount, comm, stream);
}
return ncclInvalidOperation;
}
class ReduceScatterFunctor {
public:
ncclResult_t operator()(const void* sendbuff, void* recvbuff,
int recvcount, ncclDataType_t datatype, ncclRedOp_t op, int /*root*/,
ncclComm* comm, cudaStream_t stream) {
switch (datatype) {
case ncclChar:
return ncclReduceScatterWithType<char>(sendbuff, recvbuff, recvcount,
op, comm, stream);
case ncclInt:
return ncclReduceScatterWithType<int>(sendbuff, recvbuff, recvcount,
op, comm, stream);
#ifdef CUDA_HAS_HALF
case ncclHalf:
return ncclReduceScatterWithType<half>(sendbuff, recvbuff, recvcount,
op, comm, stream);
#endif
case ncclFloat:
return ncclReduceScatterWithType<float>(sendbuff, recvbuff, recvcount,
op, comm, stream);
case ncclDouble:
return ncclReduceScatterWithType<double>(sendbuff, recvbuff, recvcount,
op, comm, stream);
}
return ncclInvalidType;
}
};
extern "C" DSOGLOBAL
ncclResult_t ncclReduceScatter(const void* sendbuff, void* recvbuff,
int recvcount, ncclDataType_t datatype, ncclRedOp_t op, ncclComm* comm,
cudaStream_t stream) {
return enqueue(ReduceScatterFunctor(), sendbuff, recvbuff, recvcount,
datatype, op, 0, comm, stream);
}
|
498ba0b0ebdef17f73868fa336437b50d7ba05a1.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/*M///////////////////////////////////////////////////////////////////////////////////////
//
// IMPORTANT: READ BEFORE DOWNLOADING, COPYING, INSTALLING OR USING.
//
// By downloading, copying, installing or using the software you agree to this license.
// If you do not agree to this license, do not download, install,
// copy or use the software.
//
//
// License Agreement
// For Open Source Computer Vision Library
//
// Copyright (C) 2000-2008, Intel Corporation, all rights reserved.
// Copyright (C) 2009, Willow Garage Inc., all rights reserved.
// Third party copyrights are property of their respective owners.
//
// Redistribution and use in source and binary forms, with or without modification,
// are permitted provided that the following conditions are met:
//
// * Redistribution's of source code must retain the above copyright notice,
// this list of conditions and the following disclaimer.
//
// * Redistribution's in binary form must reproduce the above copyright notice,
// this list of conditions and the following disclaimer in the documentation
// and/or other materials provided with the distribution.
//
// * The name of the copyright holders may not be used to endorse or promote products
// derived from this software without specific prior written permission.
//
// This software is provided by the copyright holders and contributors "as is" and
// any express or implied warranties, including, but not limited to, the implied
// warranties of merchantability and fitness for a particular purpose are disclaimed.
// In no event shall the Intel Corporation or contributors be liable for any direct,
// indirect, incidental, special, exemplary, or consequential damages
// (including, but not limited to, procurement of substitute goods or services;
// loss of use, data, or profits; or business interruption) however caused
// and on any theory of liability, whether in contract, strict liability,
// or tort (including negligence or otherwise) arising in any way out of
// the use of this software, even if advised of the possibility of such damage.
//
//M*/
#if !defined CUDA_DISABLER
#include "opencv2/core/cuda/common.hpp"
#include "opencv2/core/cuda/limits.hpp"
#include "disparity_bilateral_filter.hpp"
#include <ftl/cuda_common.hpp>
#include <ftl/cuda/weighting.hpp>
using namespace cv::cuda::device;
using namespace cv::cuda;
using namespace cv;
#define WARP_SIZE 32
#define FULL_MASK 0xFFFFFFFFu
#define PIXELS_PER_LOOP 16
namespace ftl { namespace cuda { namespace device
{
namespace disp_bilateral_filter
{
template <typename C>
__device__ inline uchar distance(C a, C b);
template <>
__device__ inline uchar distance(uchar4 a, uchar4 b) {
uchar x = ::abs(a.x - b.x);
uchar y = ::abs(a.y - b.y);
uchar z = ::abs(a.z - b.z);
return (::max(::max(x, y), z));
/*union {
unsigned int v;
uchar d[4];
};
v = __vabsdiffs4(*(unsigned int*)&a, *(unsigned int*)&b);
return (::max(::max(d[0], d[1]), d[2]));*/
}
template <>
__device__ inline uchar distance(uchar3 a, uchar3 b) {
uchar x = ::abs(a.x - b.x);
uchar y = ::abs(a.y - b.y);
uchar z = ::abs(a.z - b.z);
return (::max(::max(x, y), z));
}
template <>
__device__ inline uchar distance(uchar a, uchar b) {
return abs(int(a)-int(b));
}
/*template <int channels>
struct DistRgbMax
{
static __device__ __forceinline__ uchar calc(const uchar* a, const uchar* b)
{
// TODO: (Nick) Is this the best way to read for performance?
uchar x = ::abs(a[0] - b[0]);
uchar y = ::abs(a[1] - b[1]);
uchar z = ::abs(a[2] - b[2]);
return (::max(::max(x, y), z));
}
};
template <>
struct DistRgbMax<4>
{
static __device__ __forceinline__ uchar calc(const uchar* a, const uchar* b)
{
const uchar4 aa = *(uchar4*)a;
const uchar4 bb = *(uchar4*)b;
uchar x = ::abs(aa.x - bb.x);
uchar y = ::abs(aa.y - bb.y);
uchar z = ::abs(aa.z - bb.z);
return (::max(::max(x, y), z));
}
};
template <>
struct DistRgbMax<1>
{
static __device__ __forceinline__ uchar calc(const uchar* a, const uchar* b)
{
return ::abs(a[0] - b[0]);
}
};*/
__device__ inline float calc_colour_weight(int d) {
return exp(-float(d * d) / (2.0f * 10.0f * 10.0f));
}
template <typename T>
__device__ inline T Abs(T v) { return ::abs(v); }
template <>
__device__ inline float Abs<float>(float v) { return fabsf(v); }
template <typename C, int CRADIUS, typename T>
__global__ void disp_bilateral_filter(int t, const T* __restrict__ disp, T* __restrict__ dispout, size_t disp_step,
const C* __restrict__ img, size_t img_step, int h, int w,
const float* __restrict__ ctable_color,
T cedge_disc, T cmax_disc)
{
__shared__ float s_space[(CRADIUS+1)*(CRADIUS+1)];
__shared__ short2 s_queue[4096]; // Depends on pixels per block
__shared__ int s_counter;
// Create gaussian lookup for spatial weighting
for (int i=threadIdx.x+threadIdx.y*blockDim.x; i<(CRADIUS+1)*(CRADIUS+1); ++i) {
const int y = i / (CRADIUS+1);
const int x = i % (CRADIUS+1);
s_space[i] = exp(-sqrt(float(y * y) + float(x * x)) / float(CRADIUS+1));
}
if (threadIdx.x == 0 && threadIdx.y == 0) s_counter = 0;
__syncthreads();
// Check all pixels to see if they need processing
for (STRIDE_Y(y, h)) {
for (STRIDE_X(x, w)) {
bool todo_pixel = false;
if (y >= CRADIUS && y < h - CRADIUS && x >= CRADIUS && x < w - CRADIUS) {
T dp[5];
dp[0] = *(disp + (y ) * disp_step + x + 0);
dp[1] = *(disp + (y-1) * disp_step + x + 0);
dp[2] = *(disp + (y ) * disp_step + x - 1);
dp[3] = *(disp + (y+1) * disp_step + x + 0);
dp[4] = *(disp + (y ) * disp_step + x + 1);
*(dispout + y * disp_step + x) = dp[0];
todo_pixel = (Abs(dp[1] - dp[0]) >= cedge_disc || Abs(dp[2] - dp[0]) >= cedge_disc || Abs(dp[3] - dp[0]) >= cedge_disc || Abs(dp[4] - dp[0]) >= cedge_disc);
}
// Count valid pixels and warp and allocate space for them
const uint bal = __ballot_sync(0xFFFFFFFF, todo_pixel);
int index = 0;
if (threadIdx.x%32 == 0) {
index = atomicAdd(&s_counter, __popc(bal));
}
index = __shfl_sync(0xFFFFFFFF, index, 0, 32);
index += __popc(bal >> (threadIdx.x%32)) - 1;
if (todo_pixel) s_queue[index] = make_short2(x,y);
}
}
// Switch to processing mode
__syncthreads();
const int counter = s_counter;
// Stride the queue to reduce bank conflicts
// Each thread takes a pixel that needs processing
for (int ix=(threadIdx.x + threadIdx.y*blockDim.x); ix<counter; ix+=(blockDim.x*blockDim.y)) {
const short2 pt = s_queue[ix];
const int x = pt.x;
const int y = pt.y;
T dp[5];
dp[0] = *(disp + (y ) * disp_step + x + 0);
dp[1] = *(disp + (y-1) * disp_step + x + 0);
dp[2] = *(disp + (y ) * disp_step + x - 1);
dp[3] = *(disp + (y+1) * disp_step + x + 0);
dp[4] = *(disp + (y ) * disp_step + x + 1);
float cost[] = {0.0f, 0.0f, 0.0f, 0.0f, 0.0f};
const C ic = *(img + y * img_step + x);
//#pragma unroll
// Note: Don't unroll this one!
for(int yi = -CRADIUS; yi <= CRADIUS; ++yi)
{
const T* disp_y = disp + (y + yi) * disp_step;
#pragma unroll
for(int xi = -CRADIUS; xi <= CRADIUS; ++xi) {
const C in = *(img + (y+yi) * img_step + (xi+x));
uchar dist_rgb = distance(ic,in);
// The bilateral part of the filter
const float weight = ctable_color[dist_rgb] * s_space[::abs(yi)*(CRADIUS+1) + ::abs(xi)];
const T disp_reg = disp_y[x+xi];
// The "joint" part checking for depth similarity
cost[0] += ::min(cmax_disc, Abs(disp_reg - dp[0])) * weight;
cost[1] += ::min(cmax_disc, Abs(disp_reg - dp[1])) * weight;
cost[2] += ::min(cmax_disc, Abs(disp_reg - dp[2])) * weight;
cost[3] += ::min(cmax_disc, Abs(disp_reg - dp[3])) * weight;
cost[4] += ::min(cmax_disc, Abs(disp_reg - dp[4])) * weight;
}
}
float minimum = cost[0];
int id = 0;
if (cost[1] < minimum)
{
minimum = cost[1];
id = 1;
}
if (cost[2] < minimum)
{
minimum = cost[2];
id = 2;
}
if (cost[3] < minimum)
{
minimum = cost[3];
id = 3;
}
if (cost[4] < minimum)
{
minimum = cost[4];
id = 4;
}
*(dispout + y * disp_step + x) = dp[id];
}
}
template <typename T, typename C>
void disp_bilateral_filter(cv::cuda::PtrStepSz<T> disp, cv::cuda::PtrStepSz<T> dispout, cv::cuda::PtrStepSz<C> img, int iters, const float *table_color, size_t table_step, int radius, T edge_disc, T max_disc, hipStream_t stream)
{
dim3 threads(32, 8, 1);
dim3 grid(1, 1, 1);
grid.x = (disp.cols + 64 - 1) / 64; // 64*64 = 4096, max pixels in block
grid.y = (disp.rows + 64 - 1) / 64;
T *in_ptr = disp.data;
T *out_ptr = dispout.data;
// Iters must be odd.
if (iters & 0x1 == 0) iters += 1;
switch (radius) {
case 1 :
for (int i = 0; i < iters; ++i) {
hipLaunchKernelGGL(( disp_bilateral_filter<C,1>), dim3(grid), dim3(threads), 0, stream, 0, in_ptr, out_ptr, disp.step/sizeof(T), (C*)img.data, img.step/sizeof(C), disp.rows, disp.cols, table_color, edge_disc, max_disc);
cudaSafeCall( hipGetLastError() );
std::swap(in_ptr, out_ptr);
} break;
case 2 :
for (int i = 0; i < iters; ++i) {
hipLaunchKernelGGL(( disp_bilateral_filter<C,2>), dim3(grid), dim3(threads), 0, stream, 0, in_ptr, out_ptr, disp.step/sizeof(T), (C*)img.data, img.step/sizeof(C), disp.rows, disp.cols, table_color, edge_disc, max_disc);
cudaSafeCall( hipGetLastError() );
std::swap(in_ptr, out_ptr);
} break;
case 3 :
for (int i = 0; i < iters; ++i) {
hipLaunchKernelGGL(( disp_bilateral_filter<C,3>), dim3(grid), dim3(threads), 0, stream, 0, in_ptr, out_ptr, disp.step/sizeof(T), (C*)img.data, img.step/sizeof(C), disp.rows, disp.cols, table_color, edge_disc, max_disc);
cudaSafeCall( hipGetLastError() );
std::swap(in_ptr, out_ptr);
} break;
case 4 :
for (int i = 0; i < iters; ++i) {
hipLaunchKernelGGL(( disp_bilateral_filter<C,4>), dim3(grid), dim3(threads), 0, stream, 0, in_ptr, out_ptr, disp.step/sizeof(T), (C*)img.data, img.step/sizeof(C), disp.rows, disp.cols, table_color, edge_disc, max_disc);
cudaSafeCall( hipGetLastError() );
std::swap(in_ptr, out_ptr);
} break;
case 5 :
for (int i = 0; i < iters; ++i) {
hipLaunchKernelGGL(( disp_bilateral_filter<C,5>), dim3(grid), dim3(threads), 0, stream, 0, in_ptr, out_ptr, disp.step/sizeof(T), (C*)img.data, img.step/sizeof(C), disp.rows, disp.cols, table_color, edge_disc, max_disc);
cudaSafeCall( hipGetLastError() );
std::swap(in_ptr, out_ptr);
} break;
case 6 :
for (int i = 0; i < iters; ++i) {
hipLaunchKernelGGL(( disp_bilateral_filter<C,6>), dim3(grid), dim3(threads), 0, stream, 0, in_ptr, out_ptr, disp.step/sizeof(T), (C*)img.data, img.step/sizeof(C), disp.rows, disp.cols, table_color, edge_disc, max_disc);
cudaSafeCall( hipGetLastError() );
std::swap(in_ptr, out_ptr);
} break;
case 7 :
for (int i = 0; i < iters; ++i) {
hipLaunchKernelGGL(( disp_bilateral_filter<C,7>), dim3(grid), dim3(threads), 0, stream, 0, in_ptr, out_ptr, disp.step/sizeof(T), (C*)img.data, img.step/sizeof(C), disp.rows, disp.cols, table_color, edge_disc, max_disc);
cudaSafeCall( hipGetLastError() );
std::swap(in_ptr, out_ptr);
} break;
default:
CV_Error(cv::Error::BadTileSize, "Unsupported kernel radius");
}
//if (stream == 0)
// cudaSafeCall( hipDeviceSynchronize() );
}
// These are commented out since we don't use them and it slows compile
//template void disp_bilateral_filter<uchar,uchar>(cv::cuda::PtrStepSz<uchar> disp, cv::cuda::PtrStepSz<uchar> dispout, cv::cuda::PtrStepSz<uchar> img, int iters, const float *table_color, size_t table_step, int radius, uchar, uchar, hipStream_t stream);
//template void disp_bilateral_filter<short,uchar>(cv::cuda::PtrStepSz<short> disp, cv::cuda::PtrStepSz<short> dispout, cv::cuda::PtrStepSz<uchar> img, int iters, const float *table_color, size_t table_step, int radius, short, short, hipStream_t stream);
//template void disp_bilateral_filter<float,uchar>(cv::cuda::PtrStepSz<float> disp, cv::cuda::PtrStepSz<float> dispout, cv::cuda::PtrStepSz<uchar> img, int iters, const float *table_color, size_t table_step, int radius, float, float, hipStream_t stream);
//template void disp_bilateral_filter<uchar,uchar3>(cv::cuda::PtrStepSz<uchar> disp, cv::cuda::PtrStepSz<uchar> dispout, cv::cuda::PtrStepSz<uchar3> img, int iters, const float *table_color, size_t table_step, int radius, uchar, uchar, hipStream_t stream);
//template void disp_bilateral_filter<short,uchar3>(cv::cuda::PtrStepSz<short> disp, cv::cuda::PtrStepSz<short> dispout, cv::cuda::PtrStepSz<uchar3> img, int iters, const float *table_color, size_t table_step, int radius, short, short, hipStream_t stream);
//template void disp_bilateral_filter<float,uchar3>(cv::cuda::PtrStepSz<float> disp, cv::cuda::PtrStepSz<float> dispout, cv::cuda::PtrStepSz<uchar3> img, int iters, const float *table_color, size_t table_step, int radius, float, float, hipStream_t stream);
template void disp_bilateral_filter<uchar,uchar4>(cv::cuda::PtrStepSz<uchar> disp, cv::cuda::PtrStepSz<uchar> dispout, cv::cuda::PtrStepSz<uchar4> img, int iters, const float *table_color, size_t table_step, int radius, uchar, uchar, hipStream_t stream);
template void disp_bilateral_filter<short,uchar4>(cv::cuda::PtrStepSz<short> disp, cv::cuda::PtrStepSz<short> dispout, cv::cuda::PtrStepSz<uchar4> img, int iters, const float *table_color, size_t table_step, int radius, short, short, hipStream_t stream);
template void disp_bilateral_filter<float,uchar4>(cv::cuda::PtrStepSz<float> disp, cv::cuda::PtrStepSz<float> dispout, cv::cuda::PtrStepSz<uchar4> img, int iters, const float *table_color, size_t table_step, int radius, float, float, hipStream_t stream);
} // namespace bilateral_filter
}}} // namespace ftl { namespace cuda { namespace cudev
#endif /* CUDA_DISABLER */
| 498ba0b0ebdef17f73868fa336437b50d7ba05a1.cu | /*M///////////////////////////////////////////////////////////////////////////////////////
//
// IMPORTANT: READ BEFORE DOWNLOADING, COPYING, INSTALLING OR USING.
//
// By downloading, copying, installing or using the software you agree to this license.
// If you do not agree to this license, do not download, install,
// copy or use the software.
//
//
// License Agreement
// For Open Source Computer Vision Library
//
// Copyright (C) 2000-2008, Intel Corporation, all rights reserved.
// Copyright (C) 2009, Willow Garage Inc., all rights reserved.
// Third party copyrights are property of their respective owners.
//
// Redistribution and use in source and binary forms, with or without modification,
// are permitted provided that the following conditions are met:
//
// * Redistribution's of source code must retain the above copyright notice,
// this list of conditions and the following disclaimer.
//
// * Redistribution's in binary form must reproduce the above copyright notice,
// this list of conditions and the following disclaimer in the documentation
// and/or other materials provided with the distribution.
//
// * The name of the copyright holders may not be used to endorse or promote products
// derived from this software without specific prior written permission.
//
// This software is provided by the copyright holders and contributors "as is" and
// any express or implied warranties, including, but not limited to, the implied
// warranties of merchantability and fitness for a particular purpose are disclaimed.
// In no event shall the Intel Corporation or contributors be liable for any direct,
// indirect, incidental, special, exemplary, or consequential damages
// (including, but not limited to, procurement of substitute goods or services;
// loss of use, data, or profits; or business interruption) however caused
// and on any theory of liability, whether in contract, strict liability,
// or tort (including negligence or otherwise) arising in any way out of
// the use of this software, even if advised of the possibility of such damage.
//
//M*/
#if !defined CUDA_DISABLER
#include "opencv2/core/cuda/common.hpp"
#include "opencv2/core/cuda/limits.hpp"
#include "disparity_bilateral_filter.hpp"
#include <ftl/cuda_common.hpp>
#include <ftl/cuda/weighting.hpp>
using namespace cv::cuda::device;
using namespace cv::cuda;
using namespace cv;
#define WARP_SIZE 32
#define FULL_MASK 0xFFFFFFFFu
#define PIXELS_PER_LOOP 16
namespace ftl { namespace cuda { namespace device
{
namespace disp_bilateral_filter
{
template <typename C>
__device__ inline uchar distance(C a, C b);
template <>
__device__ inline uchar distance(uchar4 a, uchar4 b) {
uchar x = ::abs(a.x - b.x);
uchar y = ::abs(a.y - b.y);
uchar z = ::abs(a.z - b.z);
return (::max(::max(x, y), z));
/*union {
unsigned int v;
uchar d[4];
};
v = __vabsdiffs4(*(unsigned int*)&a, *(unsigned int*)&b);
return (::max(::max(d[0], d[1]), d[2]));*/
}
template <>
__device__ inline uchar distance(uchar3 a, uchar3 b) {
uchar x = ::abs(a.x - b.x);
uchar y = ::abs(a.y - b.y);
uchar z = ::abs(a.z - b.z);
return (::max(::max(x, y), z));
}
template <>
__device__ inline uchar distance(uchar a, uchar b) {
return abs(int(a)-int(b));
}
/*template <int channels>
struct DistRgbMax
{
static __device__ __forceinline__ uchar calc(const uchar* a, const uchar* b)
{
// TODO: (Nick) Is this the best way to read for performance?
uchar x = ::abs(a[0] - b[0]);
uchar y = ::abs(a[1] - b[1]);
uchar z = ::abs(a[2] - b[2]);
return (::max(::max(x, y), z));
}
};
template <>
struct DistRgbMax<4>
{
static __device__ __forceinline__ uchar calc(const uchar* a, const uchar* b)
{
const uchar4 aa = *(uchar4*)a;
const uchar4 bb = *(uchar4*)b;
uchar x = ::abs(aa.x - bb.x);
uchar y = ::abs(aa.y - bb.y);
uchar z = ::abs(aa.z - bb.z);
return (::max(::max(x, y), z));
}
};
template <>
struct DistRgbMax<1>
{
static __device__ __forceinline__ uchar calc(const uchar* a, const uchar* b)
{
return ::abs(a[0] - b[0]);
}
};*/
__device__ inline float calc_colour_weight(int d) {
return exp(-float(d * d) / (2.0f * 10.0f * 10.0f));
}
template <typename T>
__device__ inline T Abs(T v) { return ::abs(v); }
template <>
__device__ inline float Abs<float>(float v) { return fabsf(v); }
template <typename C, int CRADIUS, typename T>
__global__ void disp_bilateral_filter(int t, const T* __restrict__ disp, T* __restrict__ dispout, size_t disp_step,
const C* __restrict__ img, size_t img_step, int h, int w,
const float* __restrict__ ctable_color,
T cedge_disc, T cmax_disc)
{
__shared__ float s_space[(CRADIUS+1)*(CRADIUS+1)];
__shared__ short2 s_queue[4096]; // Depends on pixels per block
__shared__ int s_counter;
// Create gaussian lookup for spatial weighting
for (int i=threadIdx.x+threadIdx.y*blockDim.x; i<(CRADIUS+1)*(CRADIUS+1); ++i) {
const int y = i / (CRADIUS+1);
const int x = i % (CRADIUS+1);
s_space[i] = exp(-sqrt(float(y * y) + float(x * x)) / float(CRADIUS+1));
}
if (threadIdx.x == 0 && threadIdx.y == 0) s_counter = 0;
__syncthreads();
// Check all pixels to see if they need processing
for (STRIDE_Y(y, h)) {
for (STRIDE_X(x, w)) {
bool todo_pixel = false;
if (y >= CRADIUS && y < h - CRADIUS && x >= CRADIUS && x < w - CRADIUS) {
T dp[5];
dp[0] = *(disp + (y ) * disp_step + x + 0);
dp[1] = *(disp + (y-1) * disp_step + x + 0);
dp[2] = *(disp + (y ) * disp_step + x - 1);
dp[3] = *(disp + (y+1) * disp_step + x + 0);
dp[4] = *(disp + (y ) * disp_step + x + 1);
*(dispout + y * disp_step + x) = dp[0];
todo_pixel = (Abs(dp[1] - dp[0]) >= cedge_disc || Abs(dp[2] - dp[0]) >= cedge_disc || Abs(dp[3] - dp[0]) >= cedge_disc || Abs(dp[4] - dp[0]) >= cedge_disc);
}
// Count valid pixels and warp and allocate space for them
const uint bal = __ballot_sync(0xFFFFFFFF, todo_pixel);
int index = 0;
if (threadIdx.x%32 == 0) {
index = atomicAdd(&s_counter, __popc(bal));
}
index = __shfl_sync(0xFFFFFFFF, index, 0, 32);
index += __popc(bal >> (threadIdx.x%32)) - 1;
if (todo_pixel) s_queue[index] = make_short2(x,y);
}
}
// Switch to processing mode
__syncthreads();
const int counter = s_counter;
// Stride the queue to reduce bank conflicts
// Each thread takes a pixel that needs processing
for (int ix=(threadIdx.x + threadIdx.y*blockDim.x); ix<counter; ix+=(blockDim.x*blockDim.y)) {
const short2 pt = s_queue[ix];
const int x = pt.x;
const int y = pt.y;
T dp[5];
dp[0] = *(disp + (y ) * disp_step + x + 0);
dp[1] = *(disp + (y-1) * disp_step + x + 0);
dp[2] = *(disp + (y ) * disp_step + x - 1);
dp[3] = *(disp + (y+1) * disp_step + x + 0);
dp[4] = *(disp + (y ) * disp_step + x + 1);
float cost[] = {0.0f, 0.0f, 0.0f, 0.0f, 0.0f};
const C ic = *(img + y * img_step + x);
//#pragma unroll
// Note: Don't unroll this one!
for(int yi = -CRADIUS; yi <= CRADIUS; ++yi)
{
const T* disp_y = disp + (y + yi) * disp_step;
#pragma unroll
for(int xi = -CRADIUS; xi <= CRADIUS; ++xi) {
const C in = *(img + (y+yi) * img_step + (xi+x));
uchar dist_rgb = distance(ic,in);
// The bilateral part of the filter
const float weight = ctable_color[dist_rgb] * s_space[::abs(yi)*(CRADIUS+1) + ::abs(xi)];
const T disp_reg = disp_y[x+xi];
// The "joint" part checking for depth similarity
cost[0] += ::min(cmax_disc, Abs(disp_reg - dp[0])) * weight;
cost[1] += ::min(cmax_disc, Abs(disp_reg - dp[1])) * weight;
cost[2] += ::min(cmax_disc, Abs(disp_reg - dp[2])) * weight;
cost[3] += ::min(cmax_disc, Abs(disp_reg - dp[3])) * weight;
cost[4] += ::min(cmax_disc, Abs(disp_reg - dp[4])) * weight;
}
}
float minimum = cost[0];
int id = 0;
if (cost[1] < minimum)
{
minimum = cost[1];
id = 1;
}
if (cost[2] < minimum)
{
minimum = cost[2];
id = 2;
}
if (cost[3] < minimum)
{
minimum = cost[3];
id = 3;
}
if (cost[4] < minimum)
{
minimum = cost[4];
id = 4;
}
*(dispout + y * disp_step + x) = dp[id];
}
}
template <typename T, typename C>
void disp_bilateral_filter(cv::cuda::PtrStepSz<T> disp, cv::cuda::PtrStepSz<T> dispout, cv::cuda::PtrStepSz<C> img, int iters, const float *table_color, size_t table_step, int radius, T edge_disc, T max_disc, cudaStream_t stream)
{
dim3 threads(32, 8, 1);
dim3 grid(1, 1, 1);
grid.x = (disp.cols + 64 - 1) / 64; // 64*64 = 4096, max pixels in block
grid.y = (disp.rows + 64 - 1) / 64;
T *in_ptr = disp.data;
T *out_ptr = dispout.data;
// Iters must be odd.
if (iters & 0x1 == 0) iters += 1;
switch (radius) {
case 1 :
for (int i = 0; i < iters; ++i) {
disp_bilateral_filter<C,1><<<grid, threads, 0, stream>>>(0, in_ptr, out_ptr, disp.step/sizeof(T), (C*)img.data, img.step/sizeof(C), disp.rows, disp.cols, table_color, edge_disc, max_disc);
cudaSafeCall( cudaGetLastError() );
std::swap(in_ptr, out_ptr);
} break;
case 2 :
for (int i = 0; i < iters; ++i) {
disp_bilateral_filter<C,2><<<grid, threads, 0, stream>>>(0, in_ptr, out_ptr, disp.step/sizeof(T), (C*)img.data, img.step/sizeof(C), disp.rows, disp.cols, table_color, edge_disc, max_disc);
cudaSafeCall( cudaGetLastError() );
std::swap(in_ptr, out_ptr);
} break;
case 3 :
for (int i = 0; i < iters; ++i) {
disp_bilateral_filter<C,3><<<grid, threads, 0, stream>>>(0, in_ptr, out_ptr, disp.step/sizeof(T), (C*)img.data, img.step/sizeof(C), disp.rows, disp.cols, table_color, edge_disc, max_disc);
cudaSafeCall( cudaGetLastError() );
std::swap(in_ptr, out_ptr);
} break;
case 4 :
for (int i = 0; i < iters; ++i) {
disp_bilateral_filter<C,4><<<grid, threads, 0, stream>>>(0, in_ptr, out_ptr, disp.step/sizeof(T), (C*)img.data, img.step/sizeof(C), disp.rows, disp.cols, table_color, edge_disc, max_disc);
cudaSafeCall( cudaGetLastError() );
std::swap(in_ptr, out_ptr);
} break;
case 5 :
for (int i = 0; i < iters; ++i) {
disp_bilateral_filter<C,5><<<grid, threads, 0, stream>>>(0, in_ptr, out_ptr, disp.step/sizeof(T), (C*)img.data, img.step/sizeof(C), disp.rows, disp.cols, table_color, edge_disc, max_disc);
cudaSafeCall( cudaGetLastError() );
std::swap(in_ptr, out_ptr);
} break;
case 6 :
for (int i = 0; i < iters; ++i) {
disp_bilateral_filter<C,6><<<grid, threads, 0, stream>>>(0, in_ptr, out_ptr, disp.step/sizeof(T), (C*)img.data, img.step/sizeof(C), disp.rows, disp.cols, table_color, edge_disc, max_disc);
cudaSafeCall( cudaGetLastError() );
std::swap(in_ptr, out_ptr);
} break;
case 7 :
for (int i = 0; i < iters; ++i) {
disp_bilateral_filter<C,7><<<grid, threads, 0, stream>>>(0, in_ptr, out_ptr, disp.step/sizeof(T), (C*)img.data, img.step/sizeof(C), disp.rows, disp.cols, table_color, edge_disc, max_disc);
cudaSafeCall( cudaGetLastError() );
std::swap(in_ptr, out_ptr);
} break;
default:
CV_Error(cv::Error::BadTileSize, "Unsupported kernel radius");
}
//if (stream == 0)
// cudaSafeCall( cudaDeviceSynchronize() );
}
// These are commented out since we don't use them and it slows compile
//template void disp_bilateral_filter<uchar,uchar>(cv::cuda::PtrStepSz<uchar> disp, cv::cuda::PtrStepSz<uchar> dispout, cv::cuda::PtrStepSz<uchar> img, int iters, const float *table_color, size_t table_step, int radius, uchar, uchar, cudaStream_t stream);
//template void disp_bilateral_filter<short,uchar>(cv::cuda::PtrStepSz<short> disp, cv::cuda::PtrStepSz<short> dispout, cv::cuda::PtrStepSz<uchar> img, int iters, const float *table_color, size_t table_step, int radius, short, short, cudaStream_t stream);
//template void disp_bilateral_filter<float,uchar>(cv::cuda::PtrStepSz<float> disp, cv::cuda::PtrStepSz<float> dispout, cv::cuda::PtrStepSz<uchar> img, int iters, const float *table_color, size_t table_step, int radius, float, float, cudaStream_t stream);
//template void disp_bilateral_filter<uchar,uchar3>(cv::cuda::PtrStepSz<uchar> disp, cv::cuda::PtrStepSz<uchar> dispout, cv::cuda::PtrStepSz<uchar3> img, int iters, const float *table_color, size_t table_step, int radius, uchar, uchar, cudaStream_t stream);
//template void disp_bilateral_filter<short,uchar3>(cv::cuda::PtrStepSz<short> disp, cv::cuda::PtrStepSz<short> dispout, cv::cuda::PtrStepSz<uchar3> img, int iters, const float *table_color, size_t table_step, int radius, short, short, cudaStream_t stream);
//template void disp_bilateral_filter<float,uchar3>(cv::cuda::PtrStepSz<float> disp, cv::cuda::PtrStepSz<float> dispout, cv::cuda::PtrStepSz<uchar3> img, int iters, const float *table_color, size_t table_step, int radius, float, float, cudaStream_t stream);
template void disp_bilateral_filter<uchar,uchar4>(cv::cuda::PtrStepSz<uchar> disp, cv::cuda::PtrStepSz<uchar> dispout, cv::cuda::PtrStepSz<uchar4> img, int iters, const float *table_color, size_t table_step, int radius, uchar, uchar, cudaStream_t stream);
template void disp_bilateral_filter<short,uchar4>(cv::cuda::PtrStepSz<short> disp, cv::cuda::PtrStepSz<short> dispout, cv::cuda::PtrStepSz<uchar4> img, int iters, const float *table_color, size_t table_step, int radius, short, short, cudaStream_t stream);
template void disp_bilateral_filter<float,uchar4>(cv::cuda::PtrStepSz<float> disp, cv::cuda::PtrStepSz<float> dispout, cv::cuda::PtrStepSz<uchar4> img, int iters, const float *table_color, size_t table_step, int radius, float, float, cudaStream_t stream);
} // namespace bilateral_filter
}}} // namespace ftl { namespace cuda { namespace cudev
#endif /* CUDA_DISABLER */
|
82e45310e6c2de6745017683d35bc9505e905b93.hip | // !!! This is a file automatically generated by hipify!!!
/*
-- MAGMA (version 1.5.0) --
Univ. of Tennessee, Knoxville
Univ. of California, Berkeley
Univ. of Colorado, Denver
@date September 2014
@generated from zbcsrlupivloc.cu normal z -> d, Tue Sep 2 12:38:32 2014
*/
#include <hip/hip_runtime_api.h>
#include <rocblas.h> // include before magma.h
#include <fstream>
#include <stdlib.h>
#include <string>
#include <sstream>
#include <iostream>
#include <ostream>
#include <assert.h>
#include <stdio.h>
#include "magma.h"
#if (GPUSHMEM < 200)
#define BLOCK_SIZE 128
#else
#define BLOCK_SIZE 512
#endif
#define PRECISION_d
#define Ablockinfo(i,j) Ablockinfo[(i)*c_blocks + (j)]
#define Bblockinfo(i,j) Bblockinfo[(i)*c_blocks + (j)]
#define A(i,j) ((Ablockinfo(i,j)-1)*size_b*size_b)
#define B(i,j) ((Bblockinfo(i,j)-1)*size_b*size_b)
//============================================================
#define ldb m
#define lda m
#define ldc m
// every multiprocessor handles one BCSR-block
__global__ void
dbcsrlupivloc_kernel(
int size_b,
int kblocks,
double **A,
magma_int_t *ipiv)
{
if( blockIdx.x < kblocks ) {
if(threadIdx.x < size_b ){
for( int i=0; i<size_b; i++){
int dst = ipiv[i]-1;
if( dst != i ){
double *A1 = A[blockIdx.x]+threadIdx.x*size_b+i;
double *A2 = A[blockIdx.x]+threadIdx.x*size_b+dst;
double tmp = *A2;
*A2 = *A1;
*A1 = tmp;
}
}
}
}
}
/**
Purpose
-------
For a Block-CSR ILU factorization, this routine updates all blocks in
the trailing matrix.
Arguments
---------
@param
size_b magma_int_t
blocksize in BCSR
@param
kblocks magma_int_t
number of blocks
@param
dA double**
matrix in BCSR
@param
ipiv magma_int_t*
array containing pivots
@ingroup magmasparse_dgegpuk
********************************************************************/
extern "C" magma_int_t
magma_dbcsrlupivloc( magma_int_t size_b,
magma_int_t kblocks,
double **dA,
magma_int_t *ipiv ){
#if defined(PRECISION_d)
dim3 threads( 64, 1 );
dim3 grid(kblocks, 1, 1);
hipLaunchKernelGGL(( dbcsrlupivloc_kernel), dim3(grid), dim3(threads), 0, magma_stream ,
size_b, kblocks, dA, ipiv );
#endif
return MAGMA_SUCCESS;
}
| 82e45310e6c2de6745017683d35bc9505e905b93.cu | /*
-- MAGMA (version 1.5.0) --
Univ. of Tennessee, Knoxville
Univ. of California, Berkeley
Univ. of Colorado, Denver
@date September 2014
@generated from zbcsrlupivloc.cu normal z -> d, Tue Sep 2 12:38:32 2014
*/
#include <cuda_runtime_api.h>
#include <cublas_v2.h> // include before magma.h
#include <fstream>
#include <stdlib.h>
#include <string>
#include <sstream>
#include <iostream>
#include <ostream>
#include <assert.h>
#include <stdio.h>
#include "magma.h"
#if (GPUSHMEM < 200)
#define BLOCK_SIZE 128
#else
#define BLOCK_SIZE 512
#endif
#define PRECISION_d
#define Ablockinfo(i,j) Ablockinfo[(i)*c_blocks + (j)]
#define Bblockinfo(i,j) Bblockinfo[(i)*c_blocks + (j)]
#define A(i,j) ((Ablockinfo(i,j)-1)*size_b*size_b)
#define B(i,j) ((Bblockinfo(i,j)-1)*size_b*size_b)
//============================================================
#define ldb m
#define lda m
#define ldc m
// every multiprocessor handles one BCSR-block
__global__ void
dbcsrlupivloc_kernel(
int size_b,
int kblocks,
double **A,
magma_int_t *ipiv)
{
if( blockIdx.x < kblocks ) {
if(threadIdx.x < size_b ){
for( int i=0; i<size_b; i++){
int dst = ipiv[i]-1;
if( dst != i ){
double *A1 = A[blockIdx.x]+threadIdx.x*size_b+i;
double *A2 = A[blockIdx.x]+threadIdx.x*size_b+dst;
double tmp = *A2;
*A2 = *A1;
*A1 = tmp;
}
}
}
}
}
/**
Purpose
-------
For a Block-CSR ILU factorization, this routine updates all blocks in
the trailing matrix.
Arguments
---------
@param
size_b magma_int_t
blocksize in BCSR
@param
kblocks magma_int_t
number of blocks
@param
dA double**
matrix in BCSR
@param
ipiv magma_int_t*
array containing pivots
@ingroup magmasparse_dgegpuk
********************************************************************/
extern "C" magma_int_t
magma_dbcsrlupivloc( magma_int_t size_b,
magma_int_t kblocks,
double **dA,
magma_int_t *ipiv ){
#if defined(PRECISION_d)
dim3 threads( 64, 1 );
dim3 grid(kblocks, 1, 1);
dbcsrlupivloc_kernel<<< grid, threads, 0, magma_stream >>>(
size_b, kblocks, dA, ipiv );
#endif
return MAGMA_SUCCESS;
}
|
7734a3f0a52df332db8130043683e3571684625a.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <math.h>
#include <stdio.h>
#include <stdlib.h>
#include "cuda_utils.h"
// input: new_xyz(b, m, 3) xyz(b, n, 3)
// output: idx(b, m, nsample)
__global__ void query_ball_point_kernel_dense(int b, int n, int m, float radius, int nsample,
const float* __restrict__ new_xyz,
const float* __restrict__ xyz,
int64_t* __restrict__ idx_out,
float* __restrict__ dist_out)
{
int batch_index = blockIdx.x;
xyz += batch_index * n * 3;
new_xyz += batch_index * m * 3;
idx_out += m * nsample * batch_index;
dist_out += m * nsample * batch_index;
int index = threadIdx.x;
int stride = blockDim.x;
float radius2 = radius * radius;
for (int j = index; j < m; j += stride)
{
float new_x = new_xyz[j * 3 + 0];
float new_y = new_xyz[j * 3 + 1];
float new_z = new_xyz[j * 3 + 2];
for (int k = 0, cnt = 0; k < n && cnt < nsample; ++k)
{
float x = xyz[k * 3 + 0];
float y = xyz[k * 3 + 1];
float z = xyz[k * 3 + 2];
float d2 =
(new_x - x) * (new_x - x) + (new_y - y) * (new_y - y) + (new_z - z) * (new_z - z);
if (d2 < radius2)
{
if (cnt == 0)
{
for (int l = 0; l < nsample; ++l)
{
idx_out[j * nsample + l] = k;
}
}
idx_out[j * nsample + cnt] = k;
dist_out[j * nsample + cnt] = d2;
++cnt;
}
}
}
}
__global__ void query_ball_point_kernel_partial_dense(int size_x, int size_y, float radius,
int nsample, const float* __restrict__ x,
const float* __restrict__ y,
const int64_t* __restrict__ batch_x,
const int64_t* __restrict__ batch_y,
int64_t* __restrict__ idx_out,
float* __restrict__ dist_out)
{
// taken from
// https://github.com/rusty1s/pytorch_cluster/blob/master/cuda/radius_kernel.cu
const ptrdiff_t batch_idx = blockIdx.x;
const ptrdiff_t start_idx_x = batch_x[batch_idx];
const ptrdiff_t end_idx_x = batch_x[batch_idx + 1];
const ptrdiff_t start_idx_y = batch_y[batch_idx];
const ptrdiff_t end_idx_y = batch_y[batch_idx + 1];
float radius2 = radius * radius;
for (ptrdiff_t n_y = start_idx_y + threadIdx.x; n_y < end_idx_y; n_y += blockDim.x)
{
int64_t count = 0;
for (ptrdiff_t n_x = start_idx_x; n_x < end_idx_x; n_x++)
{
float dist = 0;
for (ptrdiff_t d = 0; d < 3; d++)
{
dist += (x[n_x * 3 + d] - y[n_y * 3 + d]) * (x[n_x * 3 + d] - y[n_y * 3 + d]);
}
if (dist <= radius2)
{
idx_out[n_y * nsample + count] = n_x;
dist_out[n_y * nsample + count] = dist;
count++;
}
if (count >= nsample)
{
break;
}
}
}
}
void query_ball_point_kernel_dense_wrapper(int b, int n, int m, float radius, int nsample,
const float* new_xyz, const float* xyz, int64_t* idx,
float* dist_out)
{
hipStream_t stream = at::hip::getCurrentHIPStreamMasqueradingAsCUDA();
hipLaunchKernelGGL(( query_ball_point_kernel_dense), dim3(b), dim3(opt_n_threads(m)), 0, stream, b, n, m, radius, nsample,
new_xyz, xyz, idx, dist_out);
CUDA_CHECK_ERRORS();
}
void query_ball_point_kernel_partial_wrapper(int64_t batch_size, int size_x, int size_y,
float radius, int nsample, const float* x,
const float* y, const int64_t* batch_x,
const int64_t* batch_y, int64_t* idx_out,
float* dist_out)
{
hipLaunchKernelGGL(( query_ball_point_kernel_partial_dense), dim3(batch_size), dim3(TOTAL_THREADS_SPARSE), 0, 0,
size_x, size_y, radius, nsample, x, y, batch_x, batch_y, idx_out, dist_out);
CUDA_CHECK_ERRORS();
}
| 7734a3f0a52df332db8130043683e3571684625a.cu | #include <math.h>
#include <stdio.h>
#include <stdlib.h>
#include "cuda_utils.h"
// input: new_xyz(b, m, 3) xyz(b, n, 3)
// output: idx(b, m, nsample)
__global__ void query_ball_point_kernel_dense(int b, int n, int m, float radius, int nsample,
const float* __restrict__ new_xyz,
const float* __restrict__ xyz,
int64_t* __restrict__ idx_out,
float* __restrict__ dist_out)
{
int batch_index = blockIdx.x;
xyz += batch_index * n * 3;
new_xyz += batch_index * m * 3;
idx_out += m * nsample * batch_index;
dist_out += m * nsample * batch_index;
int index = threadIdx.x;
int stride = blockDim.x;
float radius2 = radius * radius;
for (int j = index; j < m; j += stride)
{
float new_x = new_xyz[j * 3 + 0];
float new_y = new_xyz[j * 3 + 1];
float new_z = new_xyz[j * 3 + 2];
for (int k = 0, cnt = 0; k < n && cnt < nsample; ++k)
{
float x = xyz[k * 3 + 0];
float y = xyz[k * 3 + 1];
float z = xyz[k * 3 + 2];
float d2 =
(new_x - x) * (new_x - x) + (new_y - y) * (new_y - y) + (new_z - z) * (new_z - z);
if (d2 < radius2)
{
if (cnt == 0)
{
for (int l = 0; l < nsample; ++l)
{
idx_out[j * nsample + l] = k;
}
}
idx_out[j * nsample + cnt] = k;
dist_out[j * nsample + cnt] = d2;
++cnt;
}
}
}
}
__global__ void query_ball_point_kernel_partial_dense(int size_x, int size_y, float radius,
int nsample, const float* __restrict__ x,
const float* __restrict__ y,
const int64_t* __restrict__ batch_x,
const int64_t* __restrict__ batch_y,
int64_t* __restrict__ idx_out,
float* __restrict__ dist_out)
{
// taken from
// https://github.com/rusty1s/pytorch_cluster/blob/master/cuda/radius_kernel.cu
const ptrdiff_t batch_idx = blockIdx.x;
const ptrdiff_t start_idx_x = batch_x[batch_idx];
const ptrdiff_t end_idx_x = batch_x[batch_idx + 1];
const ptrdiff_t start_idx_y = batch_y[batch_idx];
const ptrdiff_t end_idx_y = batch_y[batch_idx + 1];
float radius2 = radius * radius;
for (ptrdiff_t n_y = start_idx_y + threadIdx.x; n_y < end_idx_y; n_y += blockDim.x)
{
int64_t count = 0;
for (ptrdiff_t n_x = start_idx_x; n_x < end_idx_x; n_x++)
{
float dist = 0;
for (ptrdiff_t d = 0; d < 3; d++)
{
dist += (x[n_x * 3 + d] - y[n_y * 3 + d]) * (x[n_x * 3 + d] - y[n_y * 3 + d]);
}
if (dist <= radius2)
{
idx_out[n_y * nsample + count] = n_x;
dist_out[n_y * nsample + count] = dist;
count++;
}
if (count >= nsample)
{
break;
}
}
}
}
void query_ball_point_kernel_dense_wrapper(int b, int n, int m, float radius, int nsample,
const float* new_xyz, const float* xyz, int64_t* idx,
float* dist_out)
{
cudaStream_t stream = at::cuda::getCurrentCUDAStream();
query_ball_point_kernel_dense<<<b, opt_n_threads(m), 0, stream>>>(b, n, m, radius, nsample,
new_xyz, xyz, idx, dist_out);
CUDA_CHECK_ERRORS();
}
void query_ball_point_kernel_partial_wrapper(int64_t batch_size, int size_x, int size_y,
float radius, int nsample, const float* x,
const float* y, const int64_t* batch_x,
const int64_t* batch_y, int64_t* idx_out,
float* dist_out)
{
query_ball_point_kernel_partial_dense<<<batch_size, TOTAL_THREADS_SPARSE>>>(
size_x, size_y, radius, nsample, x, y, batch_x, batch_y, idx_out, dist_out);
CUDA_CHECK_ERRORS();
}
|
8913917bdee6896b68f7e231ee4e4a1a930daf53.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
// Copyright (c) 2022 PaddlePaddle Authors. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
#include "paddle/phi/kernels/nanmedian_grad_kernel.h"
#include "paddle/phi/backends/gpu/gpu_context.h"
#include "paddle/phi/backends/gpu/gpu_launch_config.h"
#include "paddle/phi/backends/gpu/gpu_primitives.h"
#include "paddle/phi/core/kernel_registry.h"
#include "paddle/phi/core/tensor_meta.h"
#include "paddle/phi/kernels/funcs/math_function.h"
#include "paddle/phi/kernels/funcs/nanmedian_utils.h"
namespace phi {
using phi::PADDLE_CUDA_NUM_THREADS;
inline int GET_BLOCKS(const int N) {
return (N + PADDLE_CUDA_NUM_THREADS - 1) / PADDLE_CUDA_NUM_THREADS;
}
template <typename T>
__global__ void KernelNanmedianGrad(const T* x_data,
const int64_t* medians_ptr,
const T* out_grad_ptr,
T* dx_data,
int64_t stride,
int64_t pre_dim) {
CUDA_KERNEL_LOOP(index, pre_dim) {
int64_t offset = index * stride;
printf("index: %d\n", index);
printf("medians_ptr[2 * index]: %d\n", medians_ptr[2 * index]);
printf("medians_ptr[2 * index+1]: %d\n", medians_ptr[2 * index + 1]);
if (medians_ptr[2 * index] >= 0) {
if (medians_ptr[2 * index] == medians_ptr[2 * index + 1]) {
dx_data[offset + medians_ptr[2 * index]] = out_grad_ptr[index];
} else {
dx_data[offset + medians_ptr[2 * index]] =
out_grad_ptr[index] / static_cast<T>(2.0);
dx_data[offset + medians_ptr[2 * index + 1]] =
out_grad_ptr[index] / static_cast<T>(2.0);
}
}
}
}
template <typename T, typename Context>
void CalcMedianGradKernel(const Context& dev_ctx,
const DenseTensor& x,
const DenseTensor& median_index,
const DenseTensor& out_grad,
DenseTensor* x_grad) {
T* dx_data = dev_ctx.template Alloc<T>(x_grad);
if (!dx_data) return;
phi::funcs::SetConstant<Context, T> set_zero;
set_zero(dev_ctx, x_grad, static_cast<T>(0));
VLOG(0) << "x_grad->dims(): " << x_grad->dims();
auto stream = dev_ctx.stream();
const T* x_data = x.data<T>();
const int64_t* m_data = median_index.data<int64_t>();
const T* out_grad_ptr = out_grad.data<T>();
int64_t numel = x.numel();
auto x_dim = x.dims();
int64_t x_rank = x_dim.size();
int64_t stride = x_dim[x_rank - 1];
int64_t pre_dim = numel / stride;
hipLaunchKernelGGL(( KernelNanmedianGrad<T>)
, dim3(GET_BLOCKS(pre_dim)), dim3(PADDLE_CUDA_NUM_THREADS), 0, stream,
x_data, m_data, out_grad_ptr, dx_data, stride, pre_dim);
}
template <typename T, typename Context>
void NanmedianGradKernel(const Context& dev_ctx,
const DenseTensor& x,
const DenseTensor& median_index,
const DenseTensor& out_grad,
const IntArray& axes,
bool keepdim UNUSED,
DenseTensor* x_grad) {
DenseTensor tmp_x;
auto rank = x.dims().size();
if ((axes.size() == 0) || rank <= 1) {
tmp_x = x;
tmp_x.Resize({x.numel()});
CalcMedianGradKernel<T, Context>(
dev_ctx, tmp_x, median_index, out_grad, x_grad);
} else {
funcs::PreprocessMedianKernel<T, Context>(dev_ctx, x, axes, &tmp_x);
DenseTensor tmp_x_grad;
tmp_x_grad.Resize(x_grad->dims());
CalcMedianGradKernel<T, Context>(
dev_ctx, tmp_x, median_index, out_grad, &tmp_x_grad);
dev_ctx.template Alloc<T>(x_grad);
funcs::PostprocessMedianGradKernel<T, Context>(
dev_ctx, &tmp_x_grad, axes, x_grad);
}
}
} // namespace phi
PD_REGISTER_KERNEL(nanmedian_grad,
GPU,
ALL_LAYOUT,
phi::NanmedianGradKernel,
float,
double,
int,
int64_t,
phi::dtype::float16,
phi::dtype::bfloat16) {}
| 8913917bdee6896b68f7e231ee4e4a1a930daf53.cu | // Copyright (c) 2022 PaddlePaddle Authors. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
#include "paddle/phi/kernels/nanmedian_grad_kernel.h"
#include "paddle/phi/backends/gpu/gpu_context.h"
#include "paddle/phi/backends/gpu/gpu_launch_config.h"
#include "paddle/phi/backends/gpu/gpu_primitives.h"
#include "paddle/phi/core/kernel_registry.h"
#include "paddle/phi/core/tensor_meta.h"
#include "paddle/phi/kernels/funcs/math_function.h"
#include "paddle/phi/kernels/funcs/nanmedian_utils.h"
namespace phi {
using phi::PADDLE_CUDA_NUM_THREADS;
inline int GET_BLOCKS(const int N) {
return (N + PADDLE_CUDA_NUM_THREADS - 1) / PADDLE_CUDA_NUM_THREADS;
}
template <typename T>
__global__ void KernelNanmedianGrad(const T* x_data,
const int64_t* medians_ptr,
const T* out_grad_ptr,
T* dx_data,
int64_t stride,
int64_t pre_dim) {
CUDA_KERNEL_LOOP(index, pre_dim) {
int64_t offset = index * stride;
printf("index: %d\n", index);
printf("medians_ptr[2 * index]: %d\n", medians_ptr[2 * index]);
printf("medians_ptr[2 * index+1]: %d\n", medians_ptr[2 * index + 1]);
if (medians_ptr[2 * index] >= 0) {
if (medians_ptr[2 * index] == medians_ptr[2 * index + 1]) {
dx_data[offset + medians_ptr[2 * index]] = out_grad_ptr[index];
} else {
dx_data[offset + medians_ptr[2 * index]] =
out_grad_ptr[index] / static_cast<T>(2.0);
dx_data[offset + medians_ptr[2 * index + 1]] =
out_grad_ptr[index] / static_cast<T>(2.0);
}
}
}
}
template <typename T, typename Context>
void CalcMedianGradKernel(const Context& dev_ctx,
const DenseTensor& x,
const DenseTensor& median_index,
const DenseTensor& out_grad,
DenseTensor* x_grad) {
T* dx_data = dev_ctx.template Alloc<T>(x_grad);
if (!dx_data) return;
phi::funcs::SetConstant<Context, T> set_zero;
set_zero(dev_ctx, x_grad, static_cast<T>(0));
VLOG(0) << "x_grad->dims(): " << x_grad->dims();
auto stream = dev_ctx.stream();
const T* x_data = x.data<T>();
const int64_t* m_data = median_index.data<int64_t>();
const T* out_grad_ptr = out_grad.data<T>();
int64_t numel = x.numel();
auto x_dim = x.dims();
int64_t x_rank = x_dim.size();
int64_t stride = x_dim[x_rank - 1];
int64_t pre_dim = numel / stride;
KernelNanmedianGrad<T>
<<<GET_BLOCKS(pre_dim), PADDLE_CUDA_NUM_THREADS, 0, stream>>>(
x_data, m_data, out_grad_ptr, dx_data, stride, pre_dim);
}
template <typename T, typename Context>
void NanmedianGradKernel(const Context& dev_ctx,
const DenseTensor& x,
const DenseTensor& median_index,
const DenseTensor& out_grad,
const IntArray& axes,
bool keepdim UNUSED,
DenseTensor* x_grad) {
DenseTensor tmp_x;
auto rank = x.dims().size();
if ((axes.size() == 0) || rank <= 1) {
tmp_x = x;
tmp_x.Resize({x.numel()});
CalcMedianGradKernel<T, Context>(
dev_ctx, tmp_x, median_index, out_grad, x_grad);
} else {
funcs::PreprocessMedianKernel<T, Context>(dev_ctx, x, axes, &tmp_x);
DenseTensor tmp_x_grad;
tmp_x_grad.Resize(x_grad->dims());
CalcMedianGradKernel<T, Context>(
dev_ctx, tmp_x, median_index, out_grad, &tmp_x_grad);
dev_ctx.template Alloc<T>(x_grad);
funcs::PostprocessMedianGradKernel<T, Context>(
dev_ctx, &tmp_x_grad, axes, x_grad);
}
}
} // namespace phi
PD_REGISTER_KERNEL(nanmedian_grad,
GPU,
ALL_LAYOUT,
phi::NanmedianGradKernel,
float,
double,
int,
int64_t,
phi::dtype::float16,
phi::dtype::bfloat16) {}
|
5497efad3b6639f909a0c35e477bbede02eb1984.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <stdio.h>
#include "correlation_cuda_kernel.h"
#define real float
#define CUDA_NUM_THREADS 1024
#define THREADS_PER_BLOCK 32
__global__ void channels_first(float* input, float* rinput, int channels, int height, int width, int pad_size)
{
// n (batch size), c (num of channels), y (height), x (width)
int n = blockIdx.x;
int y = blockIdx.y;
int x = blockIdx.z;
int ch_off = threadIdx.x;
float value;
int dimcyx = channels * height * width;
int dimyx = height * width;
int p_dimx = (width + 2 * pad_size);
int p_dimy = (height + 2 * pad_size);
int p_dimyxc = channels * p_dimy * p_dimx;
int p_dimxc = p_dimx * channels;
for (int c = ch_off; c < channels; c += THREADS_PER_BLOCK) {
value = input[n * dimcyx + c * dimyx + y * width + x];
rinput[n * p_dimyxc + (y + pad_size) * p_dimxc + (x + pad_size) * channels + c] = value;
}
}
__global__ void Correlation_forward( float *output, int nOutputChannels, int outputHeight, int outputWidth,
float *rInput1, int nInputChannels, int inputHeight, int inputWidth,
float *rInput2,
int pad_size,
int kernel_size,
int max_displacement,
int stride1,
int stride2)
{
// n (batch size), c (num of channels), y (height), x (width)
int pInputWidth = inputWidth + 2 * pad_size;
int pInputHeight = inputHeight + 2 * pad_size;
int kernel_rad = (kernel_size - 1) / 2;
int displacement_rad = max_displacement / stride2;
int displacement_size = 2 * displacement_rad + 1;
int n = blockIdx.x;
int y1 = blockIdx.y * stride1 + max_displacement + kernel_rad;
int x1 = blockIdx.z * stride1 + max_displacement + kernel_rad;
int c = threadIdx.x;
int pdimyxc = pInputHeight * pInputWidth * nInputChannels;
int pdimxc = pInputWidth * nInputChannels;
int pdimc = nInputChannels;
int tdimcyx = nOutputChannels * outputHeight * outputWidth;
int tdimyx = outputHeight * outputWidth;
int tdimx = outputWidth;
float nelems = kernel_size * kernel_size * pdimc;
__shared__ float prod_sum[THREADS_PER_BLOCK];
// no significant speed-up in using chip memory for input1 sub-data,
// not enough chip memory size to accomodate memory per block for input2 sub-data
// instead i've used device memory for both
// element-wise product along channel axis
for (int tj = -displacement_rad; tj <= displacement_rad; ++tj ) {
for (int ti = -displacement_rad; ti <= displacement_rad; ++ti ) {
prod_sum[c] = 0;
int x2 = x1 + ti*stride2;
int y2 = y1 + tj*stride2;
for (int j = -kernel_rad; j <= kernel_rad; ++j) {
for (int i = -kernel_rad; i <= kernel_rad; ++i) {
for (int ch = c; ch < pdimc; ch += THREADS_PER_BLOCK) {
int indx1 = n * pdimyxc + (y1+j) * pdimxc + (x1 + i) * pdimc + ch;
int indx2 = n * pdimyxc + (y2+j) * pdimxc + (x2 + i) * pdimc + ch;
prod_sum[c] += rInput1[indx1] * rInput2[indx2];
}
}
}
// accumulate
__syncthreads();
if (c == 0) {
float reduce_sum = 0;
for (int index = 0; index < THREADS_PER_BLOCK; ++index) {
reduce_sum += prod_sum[index];
}
int tc = (tj + displacement_rad) * displacement_size + (ti + displacement_rad);
const int tindx = n * tdimcyx + tc * tdimyx + blockIdx.y * tdimx + blockIdx.z;
output[tindx] = reduce_sum / nelems;
}
}
}
}
__global__ void Correlation_backward_input1(int item, float *gradInput1, int nInputChannels, int inputHeight, int inputWidth,
float *gradOutput, int nOutputChannels, int outputHeight, int outputWidth,
float *rInput2,
int pad_size,
int kernel_size,
int max_displacement,
int stride1,
int stride2)
{
// n (batch size), c (num of channels), y (height), x (width)
int n = item;
int y = blockIdx.x * stride1 + pad_size;
int x = blockIdx.y * stride1 + pad_size;
int c = blockIdx.z;
int tch_off = threadIdx.x;
int kernel_rad = (kernel_size - 1) / 2;
int displacement_rad = max_displacement / stride2;
int displacement_size = 2 * displacement_rad + 1;
int xmin = (x - kernel_rad - max_displacement) / stride1;
int ymin = (y - kernel_rad - max_displacement) / stride1;
int xmax = (x + kernel_rad - max_displacement) / stride1;
int ymax = (y + kernel_rad - max_displacement) / stride1;
if (xmax < 0 || ymax < 0 || xmin >= outputWidth || ymin >= outputHeight) {
// assumes gradInput1 is pre-allocated and zero filled
return;
}
if (xmin > xmax || ymin > ymax) {
// assumes gradInput1 is pre-allocated and zero filled
return;
}
xmin = max(0,xmin);
xmax = min(outputWidth-1,xmax);
ymin = max(0,ymin);
ymax = min(outputHeight-1,ymax);
int pInputWidth = inputWidth + 2 * pad_size;
int pInputHeight = inputHeight + 2 * pad_size;
int pdimyxc = pInputHeight * pInputWidth * nInputChannels;
int pdimxc = pInputWidth * nInputChannels;
int pdimc = nInputChannels;
int tdimcyx = nOutputChannels * outputHeight * outputWidth;
int tdimyx = outputHeight * outputWidth;
int tdimx = outputWidth;
int odimcyx = nInputChannels * inputHeight* inputWidth;
int odimyx = inputHeight * inputWidth;
int odimx = inputWidth;
float nelems = kernel_size * kernel_size * nInputChannels;
__shared__ float prod_sum[THREADS_PER_BLOCK];
prod_sum[tch_off] = 0;
for (int tc = tch_off; tc < nOutputChannels; tc += THREADS_PER_BLOCK) {
int i2 = (tc % displacement_size - displacement_rad) * stride2;
int j2 = (tc / displacement_size - displacement_rad) * stride2;
int indx2 = n * pdimyxc + (y + j2)* pdimxc + (x + i2) * pdimc + c;
float val2 = rInput2[indx2];
for (int j = ymin; j <= ymax; ++j) {
for (int i = xmin; i <= xmax; ++i) {
int tindx = n * tdimcyx + tc * tdimyx + j * tdimx + i;
prod_sum[tch_off] += gradOutput[tindx] * val2;
}
}
}
__syncthreads();
if(tch_off == 0) {
float reduce_sum = 0;
for(int idx = 0; idx < THREADS_PER_BLOCK; idx++) {
reduce_sum += prod_sum[idx];
}
const int indx1 = n * odimcyx + c * odimyx + (y - pad_size) * odimx + (x - pad_size);
gradInput1[indx1] = reduce_sum / nelems;
}
}
__global__ void Correlation_backward_input2(int item, float *gradInput2, int nInputChannels, int inputHeight, int inputWidth,
float *gradOutput, int nOutputChannels, int outputHeight, int outputWidth,
float *rInput1,
int pad_size,
int kernel_size,
int max_displacement,
int stride1,
int stride2)
{
// n (batch size), c (num of channels), y (height), x (width)
int n = item;
int y = blockIdx.x * stride1 + pad_size;
int x = blockIdx.y * stride1 + pad_size;
int c = blockIdx.z;
int tch_off = threadIdx.x;
int kernel_rad = (kernel_size - 1) / 2;
int displacement_rad = max_displacement / stride2;
int displacement_size = 2 * displacement_rad + 1;
int pInputWidth = inputWidth + 2 * pad_size;
int pInputHeight = inputHeight + 2 * pad_size;
int pdimyxc = pInputHeight * pInputWidth * nInputChannels;
int pdimxc = pInputWidth * nInputChannels;
int pdimc = nInputChannels;
int tdimcyx = nOutputChannels * outputHeight * outputWidth;
int tdimyx = outputHeight * outputWidth;
int tdimx = outputWidth;
int odimcyx = nInputChannels * inputHeight* inputWidth;
int odimyx = inputHeight * inputWidth;
int odimx = inputWidth;
float nelems = kernel_size * kernel_size * nInputChannels;
__shared__ float prod_sum[THREADS_PER_BLOCK];
prod_sum[tch_off] = 0;
for (int tc = tch_off; tc < nOutputChannels; tc += THREADS_PER_BLOCK) {
int i2 = (tc % displacement_size - displacement_rad) * stride2;
int j2 = (tc / displacement_size - displacement_rad) * stride2;
int xmin = (x - kernel_rad - max_displacement - i2) / stride1;
int ymin = (y - kernel_rad - max_displacement - j2) / stride1;
int xmax = (x + kernel_rad - max_displacement - i2) / stride1;
int ymax = (y + kernel_rad - max_displacement - j2) / stride1;
if (xmax < 0 || ymax < 0 || xmin >= outputWidth || ymin >= outputHeight) {
// assumes gradInput2 is pre-allocated and zero filled
continue;
}
if (xmin > xmax || ymin > ymax) {
// assumes gradInput2 is pre-allocated and zero filled
continue;
}
xmin = max(0,xmin);
xmax = min(outputWidth-1,xmax);
ymin = max(0,ymin);
ymax = min(outputHeight-1,ymax);
int indx1 = n * pdimyxc + (y - j2)* pdimxc + (x - i2) * pdimc + c;
float val1 = rInput1[indx1];
for (int j = ymin; j <= ymax; ++j) {
for (int i = xmin; i <= xmax; ++i) {
int tindx = n * tdimcyx + tc * tdimyx + j * tdimx + i;
prod_sum[tch_off] += gradOutput[tindx] * val1;
}
}
}
__syncthreads();
if(tch_off == 0) {
float reduce_sum = 0;
for(int idx = 0; idx < THREADS_PER_BLOCK; idx++) {
reduce_sum += prod_sum[idx];
}
const int indx2 = n * odimcyx + c * odimyx + (y - pad_size) * odimx + (x - pad_size);
gradInput2[indx2] = reduce_sum / nelems;
}
}
#pragma once
#ifdef __cplusplus
extern "C"
{
#endif
int Correlation_forward_cuda_kernel(/*THCudaTensor_data(state, output)*/ float *output,
/*THCudaTensor_size(state, output, 0)*/ int ob,
/*THCudaTensor_size(state, output, 1)*/ int oc,
/*THCudaTensor_size(state, output, 2)*/ int oh,
/*THCudaTensor_size(state, output, 3)*/ int ow,
/*THCudaTensor_stride(state, output, 0)*/ int osb,
/*THCudaTensor_stride(state, output, 1)*/ int osc,
/*THCudaTensor_stride(state, output, 2)*/ int osh,
/*THCudaTensor_stride(state, output, 3)*/ int osw,
/*THCudaTensor_data(state, input1)*/ float *input1,
/*THCudaTensor_size(state, input1, 1)*/ int ic,
/*THCudaTensor_size(state, input1, 2)*/ int ih,
/*THCudaTensor_size(state, input1, 3)*/ int iw,
/*THCudaTensor_stride(state, input1, 0)*/ int isb,
/*THCudaTensor_stride(state, input1, 1)*/ int isc,
/*THCudaTensor_stride(state, input1, 2)*/ int ish,
/*THCudaTensor_stride(state, input1, 3)*/ int isw,
/*THCudaTensor_data(state, input2)*/ float *input2,
/*THCudaTensor_size(state, input2, 1)*/ int gc,
/*THCudaTensor_stride(state, input2, 0)*/ int gsb,
/*THCudaTensor_stride(state, input2, 1)*/ int gsc,
/*THCudaTensor_stride(state, input2, 2)*/ int gsh,
/*THCudaTensor_stride(state, input2, 3)*/ int gsw,
/*THCudaTensor_data(state, rInput1)*/ float *rInput1,
/*THCudaTensor_data(state, rInput2)*/ float *rInput2,
int pad_size,
int kernel_size,
int max_displacement,
int stride1,
int stride2,
int corr_type_multiply,
/*THCState_getCurrentStream(state)*/ hipStream_t stream)
{
int batchSize = ob;
int nInputChannels = ic;
int inputWidth = iw;
int inputHeight = ih;
int nOutputChannels = oc;
int outputWidth = ow;
int outputHeight = oh;
dim3 blocks_grid(batchSize, inputHeight, inputWidth);
dim3 threads_block(THREADS_PER_BLOCK);
hipLaunchKernelGGL(( channels_first), dim3(blocks_grid),dim3(threads_block), 0, stream, input1,rInput1, nInputChannels, inputHeight, inputWidth,pad_size);
hipLaunchKernelGGL(( channels_first), dim3(blocks_grid),dim3(threads_block), 0, stream, input2,rInput2, nInputChannels, inputHeight, inputWidth, pad_size);
dim3 threadsPerBlock(THREADS_PER_BLOCK);
dim3 totalBlocksCorr(batchSize, outputHeight, outputWidth);
hipLaunchKernelGGL(( Correlation_forward) , dim3(totalBlocksCorr), dim3(threadsPerBlock), 0, stream ,
output, nOutputChannels, outputHeight, outputWidth,
rInput1, nInputChannels, inputHeight, inputWidth,
rInput2,
pad_size,
kernel_size,
max_displacement,
stride1,
stride2);
// check for errors
hipError_t err = hipGetLastError();
if (err != hipSuccess) {
printf("error in Correlation_forward_cuda_kernel: %s\n", hipGetErrorString(err));
return 0;
}
return 1;
}
int Correlation_backward_cuda_kernel(
/*THCudaTensor_data(state, gradOutput)*/ float *gradOutput,
/*THCudaTensor_size(state, gradOutput, 0)*/ int gob,
/*THCudaTensor_size(state, gradOutput, 1)*/ int goc,
/*THCudaTensor_size(state, gradOutput, 2)*/ int goh,
/*THCudaTensor_size(state, gradOutput, 3)*/ int gow,
/*THCudaTensor_stride(state, gradOutput, 0)*/ int gosb,
/*THCudaTensor_stride(state, gradOutput, 1)*/ int gosc,
/*THCudaTensor_stride(state, gradOutput, 2)*/ int gosh,
/*THCudaTensor_stride(state, gradOutput, 3)*/ int gosw,
/*THCudaTensor_data(state, input1)*/ float* input1,
/*THCudaTensor_size(state, input1, 1)*/ int ic,
/*THCudaTensor_size(state, input1, 2)*/ int ih,
/*THCudaTensor_size(state, input1, 3)*/ int iw,
/*THCudaTensor_stride(state, input1, 0)*/ int isb,
/*THCudaTensor_stride(state, input1, 1)*/ int isc,
/*THCudaTensor_stride(state, input1, 2)*/ int ish,
/*THCudaTensor_stride(state, input1, 3)*/ int isw,
/*THCudaTensor_data(state, input2)*/ float *input2,
/*THCudaTensor_stride(state, input2, 0)*/ int gsb,
/*THCudaTensor_stride(state, input2, 1)*/ int gsc,
/*THCudaTensor_stride(state, input2, 2)*/ int gsh,
/*THCudaTensor_stride(state, input2, 3)*/ int gsw,
/*THCudaTensor_data(state, gradInput1)*/ float *gradInput1,
/*THCudaTensor_stride(state, gradInput1, 0)*/ int gisb,
/*THCudaTensor_stride(state, gradInput1, 1)*/ int gisc,
/*THCudaTensor_stride(state, gradInput1, 2)*/ int gish,
/*THCudaTensor_stride(state, gradInput1, 3)*/ int gisw,
/*THCudaTensor_data(state, gradInput2)*/ float *gradInput2,
/*THCudaTensor_size(state, gradInput2, 1)*/ int ggc,
/*THCudaTensor_stride(state, gradInput2, 0)*/ int ggsb,
/*THCudaTensor_stride(state, gradInput2, 1)*/ int ggsc,
/*THCudaTensor_stride(state, gradInput2, 2)*/ int ggsh,
/*THCudaTensor_stride(state, gradInput2, 3)*/ int ggsw,
/*THCudaTensor_data(state, rInput1)*/ float *rInput1,
/*THCudaTensor_data(state, rInput2)*/ float *rInput2,
int pad_size,
int kernel_size,
int max_displacement,
int stride1,
int stride2,
int corr_type_multiply,
/*THCState_getCurrentStream(state)*/hipStream_t stream)
{
int batchSize = gob;
int num = batchSize;
int nInputChannels = ic;
int inputWidth = iw;
int inputHeight = ih;
int nOutputChannels = goc;
int outputWidth = gow;
int outputHeight = goh;
dim3 blocks_grid(batchSize, inputHeight, inputWidth);
dim3 threads_block(THREADS_PER_BLOCK);
hipLaunchKernelGGL(( channels_first), dim3(blocks_grid),dim3(threads_block), 0, stream, input1, rInput1, nInputChannels,inputHeight, inputWidth, pad_size);
hipLaunchKernelGGL(( channels_first), dim3(blocks_grid),dim3(threads_block), 0, stream, input2, rInput2, nInputChannels, inputHeight, inputWidth, pad_size);
dim3 threadsPerBlock(THREADS_PER_BLOCK);
dim3 totalBlocksCorr(inputHeight, inputWidth, nInputChannels);
for (int n = 0; n < num; ++n) {
Correlation_backward_input1 << <totalBlocksCorr, threadsPerBlock, 0, stream >> > (
n, gradInput1, nInputChannels, inputHeight, inputWidth,
gradOutput, nOutputChannels, outputHeight, outputWidth,
rInput2,
pad_size,
kernel_size,
max_displacement,
stride1,
stride2);
}
for(int n = 0; n < batchSize; n++) {
hipLaunchKernelGGL(( Correlation_backward_input2), dim3(totalBlocksCorr), dim3(threadsPerBlock), 0, stream,
n, gradInput2, nInputChannels, inputHeight, inputWidth,
gradOutput, nOutputChannels, outputHeight, outputWidth,
rInput1,
pad_size,
kernel_size,
max_displacement,
stride1,
stride2);
}
// check for errors
hipError_t err = hipGetLastError();
if (err != hipSuccess) {
printf("error in Correlation_backward_cuda_kernel: %s\n", hipGetErrorString(err));
return 0;
}
return 1;
}
#ifdef __cplusplus
}
#endif
| 5497efad3b6639f909a0c35e477bbede02eb1984.cu | #include <stdio.h>
#include "correlation_cuda_kernel.h"
#define real float
#define CUDA_NUM_THREADS 1024
#define THREADS_PER_BLOCK 32
__global__ void channels_first(float* input, float* rinput, int channels, int height, int width, int pad_size)
{
// n (batch size), c (num of channels), y (height), x (width)
int n = blockIdx.x;
int y = blockIdx.y;
int x = blockIdx.z;
int ch_off = threadIdx.x;
float value;
int dimcyx = channels * height * width;
int dimyx = height * width;
int p_dimx = (width + 2 * pad_size);
int p_dimy = (height + 2 * pad_size);
int p_dimyxc = channels * p_dimy * p_dimx;
int p_dimxc = p_dimx * channels;
for (int c = ch_off; c < channels; c += THREADS_PER_BLOCK) {
value = input[n * dimcyx + c * dimyx + y * width + x];
rinput[n * p_dimyxc + (y + pad_size) * p_dimxc + (x + pad_size) * channels + c] = value;
}
}
__global__ void Correlation_forward( float *output, int nOutputChannels, int outputHeight, int outputWidth,
float *rInput1, int nInputChannels, int inputHeight, int inputWidth,
float *rInput2,
int pad_size,
int kernel_size,
int max_displacement,
int stride1,
int stride2)
{
// n (batch size), c (num of channels), y (height), x (width)
int pInputWidth = inputWidth + 2 * pad_size;
int pInputHeight = inputHeight + 2 * pad_size;
int kernel_rad = (kernel_size - 1) / 2;
int displacement_rad = max_displacement / stride2;
int displacement_size = 2 * displacement_rad + 1;
int n = blockIdx.x;
int y1 = blockIdx.y * stride1 + max_displacement + kernel_rad;
int x1 = blockIdx.z * stride1 + max_displacement + kernel_rad;
int c = threadIdx.x;
int pdimyxc = pInputHeight * pInputWidth * nInputChannels;
int pdimxc = pInputWidth * nInputChannels;
int pdimc = nInputChannels;
int tdimcyx = nOutputChannels * outputHeight * outputWidth;
int tdimyx = outputHeight * outputWidth;
int tdimx = outputWidth;
float nelems = kernel_size * kernel_size * pdimc;
__shared__ float prod_sum[THREADS_PER_BLOCK];
// no significant speed-up in using chip memory for input1 sub-data,
// not enough chip memory size to accomodate memory per block for input2 sub-data
// instead i've used device memory for both
// element-wise product along channel axis
for (int tj = -displacement_rad; tj <= displacement_rad; ++tj ) {
for (int ti = -displacement_rad; ti <= displacement_rad; ++ti ) {
prod_sum[c] = 0;
int x2 = x1 + ti*stride2;
int y2 = y1 + tj*stride2;
for (int j = -kernel_rad; j <= kernel_rad; ++j) {
for (int i = -kernel_rad; i <= kernel_rad; ++i) {
for (int ch = c; ch < pdimc; ch += THREADS_PER_BLOCK) {
int indx1 = n * pdimyxc + (y1+j) * pdimxc + (x1 + i) * pdimc + ch;
int indx2 = n * pdimyxc + (y2+j) * pdimxc + (x2 + i) * pdimc + ch;
prod_sum[c] += rInput1[indx1] * rInput2[indx2];
}
}
}
// accumulate
__syncthreads();
if (c == 0) {
float reduce_sum = 0;
for (int index = 0; index < THREADS_PER_BLOCK; ++index) {
reduce_sum += prod_sum[index];
}
int tc = (tj + displacement_rad) * displacement_size + (ti + displacement_rad);
const int tindx = n * tdimcyx + tc * tdimyx + blockIdx.y * tdimx + blockIdx.z;
output[tindx] = reduce_sum / nelems;
}
}
}
}
__global__ void Correlation_backward_input1(int item, float *gradInput1, int nInputChannels, int inputHeight, int inputWidth,
float *gradOutput, int nOutputChannels, int outputHeight, int outputWidth,
float *rInput2,
int pad_size,
int kernel_size,
int max_displacement,
int stride1,
int stride2)
{
// n (batch size), c (num of channels), y (height), x (width)
int n = item;
int y = blockIdx.x * stride1 + pad_size;
int x = blockIdx.y * stride1 + pad_size;
int c = blockIdx.z;
int tch_off = threadIdx.x;
int kernel_rad = (kernel_size - 1) / 2;
int displacement_rad = max_displacement / stride2;
int displacement_size = 2 * displacement_rad + 1;
int xmin = (x - kernel_rad - max_displacement) / stride1;
int ymin = (y - kernel_rad - max_displacement) / stride1;
int xmax = (x + kernel_rad - max_displacement) / stride1;
int ymax = (y + kernel_rad - max_displacement) / stride1;
if (xmax < 0 || ymax < 0 || xmin >= outputWidth || ymin >= outputHeight) {
// assumes gradInput1 is pre-allocated and zero filled
return;
}
if (xmin > xmax || ymin > ymax) {
// assumes gradInput1 is pre-allocated and zero filled
return;
}
xmin = max(0,xmin);
xmax = min(outputWidth-1,xmax);
ymin = max(0,ymin);
ymax = min(outputHeight-1,ymax);
int pInputWidth = inputWidth + 2 * pad_size;
int pInputHeight = inputHeight + 2 * pad_size;
int pdimyxc = pInputHeight * pInputWidth * nInputChannels;
int pdimxc = pInputWidth * nInputChannels;
int pdimc = nInputChannels;
int tdimcyx = nOutputChannels * outputHeight * outputWidth;
int tdimyx = outputHeight * outputWidth;
int tdimx = outputWidth;
int odimcyx = nInputChannels * inputHeight* inputWidth;
int odimyx = inputHeight * inputWidth;
int odimx = inputWidth;
float nelems = kernel_size * kernel_size * nInputChannels;
__shared__ float prod_sum[THREADS_PER_BLOCK];
prod_sum[tch_off] = 0;
for (int tc = tch_off; tc < nOutputChannels; tc += THREADS_PER_BLOCK) {
int i2 = (tc % displacement_size - displacement_rad) * stride2;
int j2 = (tc / displacement_size - displacement_rad) * stride2;
int indx2 = n * pdimyxc + (y + j2)* pdimxc + (x + i2) * pdimc + c;
float val2 = rInput2[indx2];
for (int j = ymin; j <= ymax; ++j) {
for (int i = xmin; i <= xmax; ++i) {
int tindx = n * tdimcyx + tc * tdimyx + j * tdimx + i;
prod_sum[tch_off] += gradOutput[tindx] * val2;
}
}
}
__syncthreads();
if(tch_off == 0) {
float reduce_sum = 0;
for(int idx = 0; idx < THREADS_PER_BLOCK; idx++) {
reduce_sum += prod_sum[idx];
}
const int indx1 = n * odimcyx + c * odimyx + (y - pad_size) * odimx + (x - pad_size);
gradInput1[indx1] = reduce_sum / nelems;
}
}
__global__ void Correlation_backward_input2(int item, float *gradInput2, int nInputChannels, int inputHeight, int inputWidth,
float *gradOutput, int nOutputChannels, int outputHeight, int outputWidth,
float *rInput1,
int pad_size,
int kernel_size,
int max_displacement,
int stride1,
int stride2)
{
// n (batch size), c (num of channels), y (height), x (width)
int n = item;
int y = blockIdx.x * stride1 + pad_size;
int x = blockIdx.y * stride1 + pad_size;
int c = blockIdx.z;
int tch_off = threadIdx.x;
int kernel_rad = (kernel_size - 1) / 2;
int displacement_rad = max_displacement / stride2;
int displacement_size = 2 * displacement_rad + 1;
int pInputWidth = inputWidth + 2 * pad_size;
int pInputHeight = inputHeight + 2 * pad_size;
int pdimyxc = pInputHeight * pInputWidth * nInputChannels;
int pdimxc = pInputWidth * nInputChannels;
int pdimc = nInputChannels;
int tdimcyx = nOutputChannels * outputHeight * outputWidth;
int tdimyx = outputHeight * outputWidth;
int tdimx = outputWidth;
int odimcyx = nInputChannels * inputHeight* inputWidth;
int odimyx = inputHeight * inputWidth;
int odimx = inputWidth;
float nelems = kernel_size * kernel_size * nInputChannels;
__shared__ float prod_sum[THREADS_PER_BLOCK];
prod_sum[tch_off] = 0;
for (int tc = tch_off; tc < nOutputChannels; tc += THREADS_PER_BLOCK) {
int i2 = (tc % displacement_size - displacement_rad) * stride2;
int j2 = (tc / displacement_size - displacement_rad) * stride2;
int xmin = (x - kernel_rad - max_displacement - i2) / stride1;
int ymin = (y - kernel_rad - max_displacement - j2) / stride1;
int xmax = (x + kernel_rad - max_displacement - i2) / stride1;
int ymax = (y + kernel_rad - max_displacement - j2) / stride1;
if (xmax < 0 || ymax < 0 || xmin >= outputWidth || ymin >= outputHeight) {
// assumes gradInput2 is pre-allocated and zero filled
continue;
}
if (xmin > xmax || ymin > ymax) {
// assumes gradInput2 is pre-allocated and zero filled
continue;
}
xmin = max(0,xmin);
xmax = min(outputWidth-1,xmax);
ymin = max(0,ymin);
ymax = min(outputHeight-1,ymax);
int indx1 = n * pdimyxc + (y - j2)* pdimxc + (x - i2) * pdimc + c;
float val1 = rInput1[indx1];
for (int j = ymin; j <= ymax; ++j) {
for (int i = xmin; i <= xmax; ++i) {
int tindx = n * tdimcyx + tc * tdimyx + j * tdimx + i;
prod_sum[tch_off] += gradOutput[tindx] * val1;
}
}
}
__syncthreads();
if(tch_off == 0) {
float reduce_sum = 0;
for(int idx = 0; idx < THREADS_PER_BLOCK; idx++) {
reduce_sum += prod_sum[idx];
}
const int indx2 = n * odimcyx + c * odimyx + (y - pad_size) * odimx + (x - pad_size);
gradInput2[indx2] = reduce_sum / nelems;
}
}
#pragma once
#ifdef __cplusplus
extern "C"
{
#endif
int Correlation_forward_cuda_kernel(/*THCudaTensor_data(state, output)*/ float *output,
/*THCudaTensor_size(state, output, 0)*/ int ob,
/*THCudaTensor_size(state, output, 1)*/ int oc,
/*THCudaTensor_size(state, output, 2)*/ int oh,
/*THCudaTensor_size(state, output, 3)*/ int ow,
/*THCudaTensor_stride(state, output, 0)*/ int osb,
/*THCudaTensor_stride(state, output, 1)*/ int osc,
/*THCudaTensor_stride(state, output, 2)*/ int osh,
/*THCudaTensor_stride(state, output, 3)*/ int osw,
/*THCudaTensor_data(state, input1)*/ float *input1,
/*THCudaTensor_size(state, input1, 1)*/ int ic,
/*THCudaTensor_size(state, input1, 2)*/ int ih,
/*THCudaTensor_size(state, input1, 3)*/ int iw,
/*THCudaTensor_stride(state, input1, 0)*/ int isb,
/*THCudaTensor_stride(state, input1, 1)*/ int isc,
/*THCudaTensor_stride(state, input1, 2)*/ int ish,
/*THCudaTensor_stride(state, input1, 3)*/ int isw,
/*THCudaTensor_data(state, input2)*/ float *input2,
/*THCudaTensor_size(state, input2, 1)*/ int gc,
/*THCudaTensor_stride(state, input2, 0)*/ int gsb,
/*THCudaTensor_stride(state, input2, 1)*/ int gsc,
/*THCudaTensor_stride(state, input2, 2)*/ int gsh,
/*THCudaTensor_stride(state, input2, 3)*/ int gsw,
/*THCudaTensor_data(state, rInput1)*/ float *rInput1,
/*THCudaTensor_data(state, rInput2)*/ float *rInput2,
int pad_size,
int kernel_size,
int max_displacement,
int stride1,
int stride2,
int corr_type_multiply,
/*THCState_getCurrentStream(state)*/ cudaStream_t stream)
{
int batchSize = ob;
int nInputChannels = ic;
int inputWidth = iw;
int inputHeight = ih;
int nOutputChannels = oc;
int outputWidth = ow;
int outputHeight = oh;
dim3 blocks_grid(batchSize, inputHeight, inputWidth);
dim3 threads_block(THREADS_PER_BLOCK);
channels_first<<<blocks_grid,threads_block, 0, stream>>> (input1,rInput1, nInputChannels, inputHeight, inputWidth,pad_size);
channels_first<<<blocks_grid,threads_block, 0, stream>>> (input2,rInput2, nInputChannels, inputHeight, inputWidth, pad_size);
dim3 threadsPerBlock(THREADS_PER_BLOCK);
dim3 totalBlocksCorr(batchSize, outputHeight, outputWidth);
Correlation_forward <<< totalBlocksCorr, threadsPerBlock, 0, stream >>>
(output, nOutputChannels, outputHeight, outputWidth,
rInput1, nInputChannels, inputHeight, inputWidth,
rInput2,
pad_size,
kernel_size,
max_displacement,
stride1,
stride2);
// check for errors
cudaError_t err = cudaGetLastError();
if (err != cudaSuccess) {
printf("error in Correlation_forward_cuda_kernel: %s\n", cudaGetErrorString(err));
return 0;
}
return 1;
}
int Correlation_backward_cuda_kernel(
/*THCudaTensor_data(state, gradOutput)*/ float *gradOutput,
/*THCudaTensor_size(state, gradOutput, 0)*/ int gob,
/*THCudaTensor_size(state, gradOutput, 1)*/ int goc,
/*THCudaTensor_size(state, gradOutput, 2)*/ int goh,
/*THCudaTensor_size(state, gradOutput, 3)*/ int gow,
/*THCudaTensor_stride(state, gradOutput, 0)*/ int gosb,
/*THCudaTensor_stride(state, gradOutput, 1)*/ int gosc,
/*THCudaTensor_stride(state, gradOutput, 2)*/ int gosh,
/*THCudaTensor_stride(state, gradOutput, 3)*/ int gosw,
/*THCudaTensor_data(state, input1)*/ float* input1,
/*THCudaTensor_size(state, input1, 1)*/ int ic,
/*THCudaTensor_size(state, input1, 2)*/ int ih,
/*THCudaTensor_size(state, input1, 3)*/ int iw,
/*THCudaTensor_stride(state, input1, 0)*/ int isb,
/*THCudaTensor_stride(state, input1, 1)*/ int isc,
/*THCudaTensor_stride(state, input1, 2)*/ int ish,
/*THCudaTensor_stride(state, input1, 3)*/ int isw,
/*THCudaTensor_data(state, input2)*/ float *input2,
/*THCudaTensor_stride(state, input2, 0)*/ int gsb,
/*THCudaTensor_stride(state, input2, 1)*/ int gsc,
/*THCudaTensor_stride(state, input2, 2)*/ int gsh,
/*THCudaTensor_stride(state, input2, 3)*/ int gsw,
/*THCudaTensor_data(state, gradInput1)*/ float *gradInput1,
/*THCudaTensor_stride(state, gradInput1, 0)*/ int gisb,
/*THCudaTensor_stride(state, gradInput1, 1)*/ int gisc,
/*THCudaTensor_stride(state, gradInput1, 2)*/ int gish,
/*THCudaTensor_stride(state, gradInput1, 3)*/ int gisw,
/*THCudaTensor_data(state, gradInput2)*/ float *gradInput2,
/*THCudaTensor_size(state, gradInput2, 1)*/ int ggc,
/*THCudaTensor_stride(state, gradInput2, 0)*/ int ggsb,
/*THCudaTensor_stride(state, gradInput2, 1)*/ int ggsc,
/*THCudaTensor_stride(state, gradInput2, 2)*/ int ggsh,
/*THCudaTensor_stride(state, gradInput2, 3)*/ int ggsw,
/*THCudaTensor_data(state, rInput1)*/ float *rInput1,
/*THCudaTensor_data(state, rInput2)*/ float *rInput2,
int pad_size,
int kernel_size,
int max_displacement,
int stride1,
int stride2,
int corr_type_multiply,
/*THCState_getCurrentStream(state)*/cudaStream_t stream)
{
int batchSize = gob;
int num = batchSize;
int nInputChannels = ic;
int inputWidth = iw;
int inputHeight = ih;
int nOutputChannels = goc;
int outputWidth = gow;
int outputHeight = goh;
dim3 blocks_grid(batchSize, inputHeight, inputWidth);
dim3 threads_block(THREADS_PER_BLOCK);
channels_first<<<blocks_grid,threads_block, 0, stream>>> (input1, rInput1, nInputChannels,inputHeight, inputWidth, pad_size);
channels_first<<<blocks_grid,threads_block, 0, stream>>> (input2, rInput2, nInputChannels, inputHeight, inputWidth, pad_size);
dim3 threadsPerBlock(THREADS_PER_BLOCK);
dim3 totalBlocksCorr(inputHeight, inputWidth, nInputChannels);
for (int n = 0; n < num; ++n) {
Correlation_backward_input1 << <totalBlocksCorr, threadsPerBlock, 0, stream >> > (
n, gradInput1, nInputChannels, inputHeight, inputWidth,
gradOutput, nOutputChannels, outputHeight, outputWidth,
rInput2,
pad_size,
kernel_size,
max_displacement,
stride1,
stride2);
}
for(int n = 0; n < batchSize; n++) {
Correlation_backward_input2<<<totalBlocksCorr, threadsPerBlock, 0, stream>>>(
n, gradInput2, nInputChannels, inputHeight, inputWidth,
gradOutput, nOutputChannels, outputHeight, outputWidth,
rInput1,
pad_size,
kernel_size,
max_displacement,
stride1,
stride2);
}
// check for errors
cudaError_t err = cudaGetLastError();
if (err != cudaSuccess) {
printf("error in Correlation_backward_cuda_kernel: %s\n", cudaGetErrorString(err));
return 0;
}
return 1;
}
#ifdef __cplusplus
}
#endif
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.