hip_filename
stringlengths 5
84
| hip_content
stringlengths 79
9.69M
| cuda_filename
stringlengths 4
83
| cuda_content
stringlengths 19
9.69M
|
---|---|---|---|
f77ab786d172d232581378869fe586eeeb7c2e71.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <vector>
#include <string>
#include <iostream>
#include <algorithm>
#include <sstream>
#include <ctime>
#include <signal.h>
#include <unistd.h>
#include <stdio.h>
#include <fstream>
#include <stdlib.h>
#include <math.h>
#include <unordered_set>
#include <hiprand/hiprand.h>
#include <hiprand/hiprand_kernel.h>
using namespace std;
struct Edge;
struct Vertex;
struct Layer;
int random(int min, int max);
float average(float average, float dataPoint);
void setupListeners();
template<typename T>
inline void removeFromVector(vector<T> & v, const T & item);
struct Edge {
float weight;
float change;
Vertex* from;
Vertex* to;
};
struct Vertex {
vector<Edge*> inputs;
vector<Edge*> outputs;
float output;
float bias;
int layer;
bool isOutput;
int index;
float delta;
float error;
};
struct Layer {
vector<Vertex*> nodes;
vector<Edge*> edges;
int* edgeNodeMapping;
};
class NN {
public:
NN(bool useGPU, int input, vector<int> hidden, int output);
vector<Layer> layers;
vector<Vertex*> nodes;
vector<Edge*> edges;
int inputSize;
int outputSize;
int outputIdx;
bool useGPU;
int nodeIdx;
Vertex* addVertex(std::vector<Vertex*> inputs, std::vector<Vertex*> outputs);
Vertex* addVertex(int inputCount, int outputCount, int layer, bool useBasicLayering);
Vertex* addVertex(int inputs, int outputs, bool isInput, bool isOutput, int layer, bool useStrictLayers);
void removeVertex();
Edge* addEdge(Vertex* from, Vertex* to, bool addLayer);
Edge* addEdge(Vertex* from, Vertex* to);
void removeEdge(Edge* edge);
int findLayer(Vertex* vertex);
void initRNGCPU();
float trainGPU(vector<float> inputs, vector<float> target);
float trainGPU(vector<float> inputs, vector<float> target, float learningRate, float momentum);
vector<float> runGPULauncher(vector<float>& inputs);
vector<float> runCpu(vector<float>& inputs);
vector<float> run(vector<float>& inputs);
void updateHostMemory();
void updateDeviceMemory();
void print(ostream& output);
double layerDist(double x, int mean);
// CUDA pointers
float* d_weights;
float* d_outputs;
float* d_bias;
int* d_edgeNodeMappingTo;
int* d_edgeNodeMappingFrom;
float* d_errors;
float* d_deltas;
float* d_changes;
float* d_target;
bool* d_managementThreads;
hiprandState_t* RNGStates;
};
bool userEntryMode = false;
bool running = true;
void my_handler(int s){
printf("Caught signal %d. Entering user input mode.\n",s);
if (userEntryMode) {
running = false;
}
userEntryMode = true;
setupListeners();
}
void setupListeners() {
struct sigaction sigIntHandler;
sigIntHandler.sa_handler = my_handler;
sigemptyset(&sigIntHandler.sa_mask);
sigIntHandler.sa_flags = 0;
sigaction(SIGINT, &sigIntHandler, NULL);
}
/*int test(bool useGPU, vector<vector<float>> inputs, vector<vector<float>> targets, vector<int> hidden) {
int inputSize = inputs[0].size();
int outputSize = targets[0].size();
srand(0);
setupListeners();
NN nn(useGPU, inputSize, hidden, outputSize);
//ofstream outputFile(string(useGPU?"gpu":"cpu") + ".graph", ofstream::out);
//nn.print(outputFile);
clock_t begin = clock();
vector<float> target(outputSize);
vector<float> input(inputSize);
float error = 1.0;
do {
int index = random(0, inputSize);
if (userEntryMode) {
std::cout << "\n";
cin >> iInput;
std::cout << "\n";
}
} while(error > 0 && running);
}*/
int test(bool useGPU, int inputSize, vector<int> hidden, int outputSize) {
srand(0);
setupListeners();
NN nn(useGPU, inputSize, hidden, outputSize);
ofstream outputFile(string(useGPU?"gpu":"cpu") + ".graph", ofstream::out);
nn.print(outputFile);
clock_t begin = clock();
vector<float> target(outputSize);
vector<float> input(inputSize);
float error = 1.0;
do {
int iInput = random(100, 1000);
if (userEntryMode) {
std::cout << "\n";
cin >> iInput;
std::cout << "\n" << iInput << "\n";
}
int n = iInput;
int i = 0;
while (n) {
input[i++] = (n % 10)/10.0;
n /= 10;
}
//input[0] = iInput/1000.0;
target[0] = iInput%2==0?1.0:0.0;
//target[0] = (iInput>500?1.0:0.0);
std::cout << "Expected: " << target[0] << ", Input: " << iInput << ", ";
if (userEntryMode) {
float result = nn.runGPULauncher(input)[0];
cout << "Output: " << result << "\n";
}else{
error = average(error, nn.trainGPU(input, target, error*2, error));
std::cout << "\rError: " << error;
}
} while(error > 0.005 && running);
clock_t end = clock();
double elapsed_secs = double(end - begin) / CLOCKS_PER_SEC;
cout << "Time ms: " << elapsed_secs*1000 << "\n";
return 0;
}
int main(int argc, char** argv) {
vector<int> hiddenSizes;
hiddenSizes.push_back(5);
hiddenSizes.push_back(10);
hiddenSizes.push_back(50);
hiddenSizes.push_back(200);
hiddenSizes.push_back(50);
hiddenSizes.push_back(10);
//hiddenSizes.push_back(1);
for (int i=0; i<1; i++) {
test(true, 3, hiddenSizes, 1);
}
cout << "--------------------------\n";
/*for (int i=0; i<1; i++) {
test(false, 1, hiddenSizes, 1);
}*/
}
// layers: [inputSize, hiddenSize, outputSize]
NN::NN(bool useGPU, int input, vector<int> hidden, int output) {
// comment in for release
//srand(time(NULL));
inputSize = 0;
outputSize = 0;
outputIdx = 0;
nodeIdx = 0;
this->useGPU = useGPU;
int nodes = 0;
for (int i=0; i<input; ++i) {
addVertex(vector<Vertex*>(), vector<Vertex*>());
//addVertex(0, 0, true, false, 0, false);
nodes++;
inputSize++;
}
for (int i=0; i<hidden.size(); ++i) {
int prevLayerNodes = nodes;
for (int j=0; j<hidden[i]; ++j) {
addVertex(random(1, prevLayerNodes), 0, i+1, false);
//addVertex(random(1, prevLayerNodes), 0, false, false, i+1, false);
nodes++;
}
}
for (int i=0; i<output; ++i) {
addVertex(random(1, nodes), 0, hidden.size()+1, false);
//addVertex(random(1, nodes), 0, false, true, 2, false);
outputSize++;
}
/*for (int i=0; i<layers.size()-1; ++i) {
for (int j=0; j<layers[i].nodes.size(); ++j) {
Vertex* vertex = layers[i].nodes[j];
if (vertex->outputs.size() == 0) {
Layer& outputLayer = layers[i+1];
Vertex* outputVertex = outputLayer.nodes[
random(0,
outputLayer.nodes.size()-1
)
];
addEdge(vertex, outputVertex, true);
}
}
}*/
if (useGPU) {
updateDeviceMemory();
}
}
double NN::layerDist(double x, int mean) {
return -1*pow(2*x-mean,2)+mean;
}
void NN::updateDeviceMemory() {
initRNGCPU();
int edgeCount = edges.size();
int nodeCount = nodes.size();
size_t floatEdge = sizeof(float)*edgeCount;
size_t floatNode = sizeof(float)*nodeCount;
size_t intEdge = sizeof(int)*edgeCount;
// TODO Free previous device memory
// Malloc device memory
hipMalloc(&d_weights, floatEdge);
hipMalloc(&d_outputs, floatNode);
hipMalloc(&d_bias, floatNode);
hipMalloc(&d_edgeNodeMappingTo, intEdge);
hipMalloc(&d_edgeNodeMappingFrom, intEdge);
hipMalloc(&d_errors, floatNode);
hipMalloc(&d_deltas, floatNode);
hipMalloc(&d_changes, floatEdge);
hipMalloc(&d_target, sizeof(float)*outputSize);
hipMalloc(&d_managementThreads, sizeof(bool)*edgeCount);
hipMemset(d_outputs, 0, floatNode);
hipMemset(d_changes, 0, floatEdge);
float* weights = (float*) malloc(floatEdge);
float* bias = (float*) malloc(floatNode);
int* edgeNodeMappingTo = (int*) malloc(intEdge);
int* edgeNodeMappingFrom = (int*) malloc(intEdge);
float* errors = (float*) malloc(floatNode);
float* deltas = (float*) malloc(floatNode);
float* changes = (float*) malloc(floatEdge);
bool* managementThreads = (bool*) malloc(sizeof(bool)*edgeCount);
int currEdge = 0;
int currNode = 0;
for (int i=0; i<layers.size(); ++i) {
int edgesInLayer = 0;
for (int j=0; j<layers[i].nodes.size(); ++j) {
Vertex* node = layers[i].nodes[j];
bias[currNode] = node->bias;
errors[currNode] = node->error;
deltas[currNode] = node->delta;
for (int k=0; k<node->inputs.size(); ++k) {
edgesInLayer++;
Edge* edge = node->inputs[k];
weights[currEdge] = edge->weight;
changes[currEdge] = edge->change;
managementThreads[currEdge] = (k==0);
edgeNodeMappingTo[currEdge] = currNode;
edgeNodeMappingFrom[currEdge] = edge->from->index;
currEdge++;
}
int inputCount = node->inputs.size();
node->index = currNode;
currNode++;
}
cout << "Edges In Layer " << i << " = " << edgesInLayer << "\n";
}
for (int i=0; i<edgeCount; ++i) {
if (edgeNodeMappingTo[i] < 0 || edgeNodeMappingTo[i]>nodeCount)
cout << "edgeNodeMappingTo: [" << i << "]: " << edgeNodeMappingTo[i] << "\n";
if (edgeNodeMappingFrom[i] < 0 || edgeNodeMappingFrom[i]>nodeCount)
cout << "edgeNodeMappingFrom: [" << i << "]: " << edgeNodeMappingFrom[i] << "\n";
if (weights[i] < 0 || weights[i]>1)
cout << "weights: [" << i << "]: " << weights[i] << "\n";
}
for (int i=0; i<edgeCount; ++i) {
if (edgeNodeMappingTo[i] < 0 || edgeNodeMappingTo[i]>nodeCount)
cout << "edgeNodeMappingTo: [" << i << "]: " << edgeNodeMappingTo[i] << "\n";
}
hipMemcpy(d_weights, weights, floatEdge, hipMemcpyHostToDevice);
hipMemcpy(d_bias, bias, floatNode, hipMemcpyHostToDevice);
hipMemcpy(d_edgeNodeMappingTo, edgeNodeMappingTo, intEdge, hipMemcpyHostToDevice);
hipMemcpy(d_edgeNodeMappingFrom, edgeNodeMappingFrom, intEdge, hipMemcpyHostToDevice);
hipMemcpy(d_errors, bias, floatNode, hipMemcpyHostToDevice);
hipMemcpy(d_deltas, bias, floatNode, hipMemcpyHostToDevice);
hipMemcpy(d_changes, bias, floatNode, hipMemcpyHostToDevice);
hipMemcpy(d_managementThreads, managementThreads, sizeof(bool)*edgeCount, hipMemcpyHostToDevice);
free(weights);
free(bias);
free(edgeNodeMappingTo);
free(edgeNodeMappingFrom);
free(errors);
free(deltas);
free(changes);
free(managementThreads);
}
void NN::updateHostMemory() {
int edgeCount = edges.size();
int nodeCount = nodes.size();
size_t floatEdge = sizeof(float)*edgeCount;
size_t floatNode = sizeof(float)*nodeCount;
size_t intEdge = sizeof(int)*edgeCount;
//size_t intNode = sizeof(int)*nodeCount;
float* weights = (float*) malloc(floatEdge);
float* bias = (float*) malloc(floatNode);
int* edgeNodeMappingTo = (int*) malloc(intEdge);
int* edgeNodeMappingFrom = (int*) malloc(intEdge);
float* errors = (float*) malloc(floatNode);
float* deltas = (float*) malloc(floatNode);
float* changes = (float*) malloc(floatEdge);
hipMemcpy(weights, d_weights, floatEdge, hipMemcpyDeviceToHost);
hipMemcpy(bias, d_bias, floatNode, hipMemcpyDeviceToHost);
hipMemcpy(edgeNodeMappingTo, d_edgeNodeMappingTo, intEdge, hipMemcpyDeviceToHost);
hipMemcpy(edgeNodeMappingFrom, d_edgeNodeMappingFrom, intEdge, hipMemcpyDeviceToHost);
hipMemcpy(errors, d_errors, floatNode, hipMemcpyDeviceToHost);
hipMemcpy(deltas, d_deltas, floatNode, hipMemcpyDeviceToHost);
hipMemcpy(changes, d_changes, floatNode, hipMemcpyDeviceToHost);
unordered_set<int> nodesCreated;
vector<Vertex*> emptyEdgeList;
for (int i=0; i<edgeCount; ++i) {
int nodeTo = edgeNodeMappingTo[i];
int nodeFrom = edgeNodeMappingFrom[i];
int nodeToCreate = -1;
if (!nodesCreated.count(nodeTo)) {
nodeToCreate = nodeTo;
}else if (!nodesCreated.count(nodeFrom)) {
nodeToCreate = nodeFrom;
}
if (nodeToCreate != -1) {
nodesCreated.insert(nodeToCreate);
Vertex* vertex = addVertex(emptyEdgeList, emptyEdgeList);
vertex->delta = deltas[nodeToCreate];
vertex->error = errors[nodeToCreate];
//vertex->change = changes[nodeToCreate];
vertex->bias = bias[nodeToCreate];
}
}
free(weights);
free(bias);
free(edgeNodeMappingTo);
free(edgeNodeMappingFrom);
free(errors);
free(deltas);
free(changes);
}
vector<float> NN::run(vector<float>& inputs) {
if (useGPU) {
return runGPULauncher(inputs);
}else{
return runCpu(inputs);
}
}
__global__ void initRNG(unsigned int seed, hiprandState_t* states) {
/* we have to initialize the state */
hiprand_init(seed, /* the seed can be the same for each core, here we pass the time in from the CPU */
blockIdx.x, /* the sequence number should be different for each core (unless you want all
cores to get the same sequence of numbers for some reason - use thread id! */
0, /* the offset is how much extra we advance in the sequence for each call, can be 0 */
&states[blockIdx.x]);
}
void NN::initRNGCPU() {
int N = edges.size()/512+1;
hipMalloc((void**) &RNGStates, N * sizeof(hiprandState_t));
hipLaunchKernelGGL(( initRNG), dim3(N), dim3(1), 0, 0, /*time(0)*/0, RNGStates);
}
__global__ void runGPU(float* weights, int* edgeNodeMappingTo, int* edgeNodeMappingFrom, float* outputs, float* bias, bool* managementThreads, int offset, int n) {
int id = blockIdx.x*blockDim.x+threadIdx.x + offset;
if (id < n) {
int nodeTo = edgeNodeMappingTo[id];
int nodeFrom = edgeNodeMappingFrom[id];
atomicAdd(&outputs[nodeTo], outputs[nodeFrom] * weights[id]);
/*if (managementThreads[id]) {
outputs[nodeTo] = 1/(1+exp(-1*(outputs[nodeTo]+bias[nodeTo])));
}*/
}
}
__global__ void runGPUPost(float* outputs, float* bias, int offset, int n) {
int id = blockIdx.x*blockDim.x+threadIdx.x + offset;
if (id < n) {
outputs[id] = 1/(1+exp(-1*(outputs[id]+bias[id])));
}
}
__global__ void runGPUProb(float* weights, int* edgeNodeMappingTo, int* edgeNodeMappingFrom, float* outputs, float* bias, int offset, int n, hiprandState_t* states) {
int id = blockIdx.x*blockDim.x+threadIdx.x + offset;
if (id < n) {
int nodeTo = edgeNodeMappingTo[id];
int nodeFrom = edgeNodeMappingFrom[id];
if (hiprand_uniform(&states[blockIdx.x]) >= weights[id]) {
atomicAdd(&outputs[nodeTo], outputs[nodeFrom]);
}
outputs[nodeTo] = 1/(1+exp(-1*(outputs[nodeTo]+bias[nodeTo])));
}
}
__global__ void runGPUMultiplex(bool* activeEdges, float* weights, int* edgeNodeMappingTo, int* edgeNodeMappingFrom, float* outputs, float* bias, int offset, int n) {
int id = blockIdx.x*blockDim.x+threadIdx.x + offset;
if (id < n) {
int nodeTo = edgeNodeMappingTo[id];
int nodeFrom = edgeNodeMappingFrom[id];
if (activeEdges[id]) {
atomicAdd(&outputs[nodeTo], outputs[nodeFrom] * weights[id]);
}
outputs[nodeTo] = 1/(1+exp(-1*(outputs[nodeTo]+bias[nodeTo])));
//outputs[nodeTo]
}
}
/*
learningRate
momentum
target: Node-wise
weights: Edge-wise
outputs: Node-wise
edgeNodeMappingFrom: Edge-wise
edgeNodeMappingTo: Edge-wise
nodeRunCount: Node-wise
initialNodeRunCount: Node-wise
errors: Node-wise
deltas: Node-wise
bias: Node-wise
changes: Edge-wise
n
*/
__global__ void learnGPU(float learningRate,
float momentum,
float* weights,
float* outputs,
int* edgeNodeMappingFrom,
int* edgeNodeMappingTo,
float* errors,
float* deltas,
float* bias,
float* changes,
int offset,
int n,
float* buffer) {
int id = blockIdx.x*blockDim.x+threadIdx.x+offset;
if (id < offset+n && id>=0) {
int nodeTo = edgeNodeMappingTo[id];
int nodeFrom = edgeNodeMappingFrom[id];
float output = outputs[nodeFrom];
float& weight = weights[id];
float delta = deltas[nodeTo];
atomicAdd(&errors[nodeFrom], delta * weight);
deltas[nodeFrom] = errors[nodeFrom] * output * (1-output);
//atomicAdd(&nodeRunCount[nodeFrom], -1);
//if (nodeRunCount[nodeFrom] == 0) {
//bias[nodeTo] += learningRate * delta;
// nodeRunCount[nodeFrom] = initialNodeRunCount[nodeFrom];
//}
float& change = changes[id];
change = (learningRate * delta * output)
+ (momentum * change);
weight += change;
}
}
__global__ void learnGPUPost(float* deltas, float* bias, float learningRate, int offset, int n) {
int id = blockIdx.x*blockDim.x+threadIdx.x+offset;
if (id < offset+n && id>=0) {
bias[id] = learningRate * deltas[id];
}
}
#define cudaCheckErrors(msg) \
do { \
hipError_t __err = hipGetLastError(); \
if (__err != hipSuccess) { \
fprintf(stderr, "Fatal error: %s (%s at %s:%d)\n", \
msg, hipGetErrorString(__err), \
__FILE__, __LINE__); \
fprintf(stderr, "*** FAILED - ABORTING\n"); \
exit(1); \
} \
} while (0)
float NN::trainGPU(vector<float> inputs, vector<float> target) {
return trainGPU(inputs, target, 0.3, 0.1);
}
float NN::trainGPU(vector<float> inputs, vector<float> target, float learningRate, float momentum) {
vector<float> results = runGPULauncher(inputs);
int nodeSize = nodes.size();
int outputSize = target.size();
float errors[nodeSize];
float deltas[nodeSize];
memset(errors, 0, sizeof errors);
memset(deltas, 0, sizeof deltas);
float errorSum = 0;
for (int i=1; i<=outputSize; ++i) {
//cout << "output[1] = " << results[results.size()-i] << ", output[2] = " << std::round(results[results.size()-i]) << "\n";
float output = results[results.size()-i];
std::cout << "Output: " << output << "\n";
errors[nodeSize-i] = target[outputSize-i] - output;
deltas[nodeSize-i] = errors[nodeSize-i] * output * (1-output);
errorSum += errors[nodeSize-i];
}
float error = abs(errorSum/outputSize);
//std::cout << "Error: " << error << "\n";
hipMemcpy(d_errors, &errors[0], nodeSize*sizeof(float), hipMemcpyHostToDevice);
hipMemcpy(d_deltas, &deltas[0], nodeSize*sizeof(float), hipMemcpyHostToDevice);
int offset = edges.size();
int nodesOffset = nodes.size();
float* d_buffer;
//hipMalloc(&d_buffer, offset*sizeof(float));
for (int i=layers.size()-1; i>0; --i) {
int N = layers[i].edges.size();
int nodesN = layers[i].nodes.size();
offset -= N;
nodesOffset -= nodesN;
//std::cout << "N["<<i<<"]: "<<N<<", offset["<<i<<"]: " << offset << "\n";
int edgeCount = layers[i].edges.size();
int nodeCount = layers[i].nodes.size();
int blockSize = 512;// or 64?
int gridSize = edgeCount/blockSize + 1;
hipLaunchKernelGGL(( learnGPU), dim3(gridSize), dim3(blockSize), 0, 0, learningRate,
momentum,
d_weights,
d_outputs,
d_edgeNodeMappingFrom,
d_edgeNodeMappingTo,
d_errors,
d_deltas,
d_bias,
d_changes,
offset,
N,
d_buffer);
blockSize = 512;// or 64?
gridSize = nodeCount/blockSize + 1;
hipLaunchKernelGGL(( learnGPUPost), dim3(gridSize), dim3(blockSize), 0, 0, d_deltas, d_bias, learningRate, nodesOffset, nodesN);
//cudaCheckErrors("kernel");
}
/*float* buffer = (float*) (sizeof(float)*edges.size());
hipMemcpy(buffer, d_buffer, edges.size()*sizeof(float), hipMemcpyDeviceToHost);
for (int i=0; i<edges.size(); ++i) {
std::cout << "buffer["<<i<<"] = " << buffer[i] << "\n";
}*/
return error;
}
vector<float> NN::runGPULauncher(vector<float>& inputs) {
clock_t begin = clock();
//cout << "Time ms: " << ((clock() - begin)/(double)CLOCKS_PER_SEC)*1000 << "\n";
for (int i=inputs.size(); i<nodes.size(); ++i) {
inputs.push_back(0);
}
hipMemcpy(d_outputs, &inputs[0], inputs.size()*sizeof(float), hipMemcpyHostToDevice);
//cudaCheckErrors("copy");
//cout << "Time Post Copy: " << ((clock() - begin)/(double)CLOCKS_PER_SEC)*1000 << "\n";
int offset = 0;
int nodeOffset = layers[0].nodes.size();
//cout << "\nnodeCount: " << layers[0].nodes.size() << "\n";
for (int i=1; i<layers.size(); ++i) {
int edgeCount = layers[i].edges.size();
int nodeCount = layers[i].nodes.size();
//cout << "nodeCount: " << nodeCount << "\n";
int gridSize, blockSize;
blockSize = 512;// or 64?
gridSize = edgeCount/blockSize + 1;
hipLaunchKernelGGL(( runGPU), dim3(gridSize), dim3(blockSize), 0, 0, d_weights, d_edgeNodeMappingTo, d_edgeNodeMappingFrom, d_outputs, d_bias, d_managementThreads, offset, offset+edgeCount);
//runGPUProb<<<gridSize, blockSize>>>(d_weights, d_edgeNodeMappingTo, d_edgeNodeMappingFrom, d_outputs, d_bias, offset, offset+edgeCount, RNGStates);
blockSize = 512;// or 64?
gridSize = nodeCount/blockSize + 1;
hipLaunchKernelGGL(( runGPUPost), dim3(gridSize), dim3(blockSize), 0, 0, d_outputs, d_bias, nodeOffset, nodeOffset+nodeCount);
//cout << "Time Post Kernel " << i << ": " << ((clock() - begin)/(double)CLOCKS_PER_SEC)*1000 << "\n";
//cudaCheckErrors("kernel");
offset += edgeCount;
nodeOffset += nodeCount;
}
int outputLayerSize = layers[layers.size()-1].nodes.size();
//cout << "\nOutput Size = " << outputLayerSize << "\n";
float* outputs = (float*) malloc( sizeof(float)*outputLayerSize );
//float* weights = (float*) malloc( sizeof(float)*edges.size() );
hipMemcpy(outputs, d_outputs+(nodes.size()-outputLayerSize), outputLayerSize*sizeof(float), hipMemcpyDeviceToHost);
//hipMemcpy(weights, d_weights, edges.size()*sizeof(float), hipMemcpyDeviceToHost);
for (int i=0; i<edges.size(); ++i) {
// std::cout << "Weight["<<i<<"] = " << weights[i] << "\n";
}
//cout << "Time post copy output: " << ((clock() - begin)/(double)CLOCKS_PER_SEC)*1000 << "\n";
vector<float> result(outputs, outputs + outputLayerSize);
free(outputs);
return result;
}
vector<float> NN::runCpu(vector<float>& inputs) {
vector<float> result;
while (inputs.size() < layers[0].nodes.size()) {
inputs.push_back(0);
}
// Stage input vertices
for (int i=0; i<inputs.size(); ++i) {
layers[0].nodes[i]->output = inputs[i];
result.push_back(inputs[i]);
}
int layerSize = layers.size();
// Forward propegate each layer
for (int i=1; i<layerSize; ++i) {
vector<Vertex*> layerNodes = layers[i].nodes;
for (int j=0; j<layerNodes.size(); ++j) {
Vertex* node = layerNodes[j];
float sum = node->bias;
for (int inputIdx=0; inputIdx < node->inputs.size(); ++inputIdx) {
Edge* inputEdge = node->inputs[inputIdx];
sum += inputEdge->weight * inputEdge->from->output;
}
float outputValue = 1/(1+exp(-1*sum));
node->output = outputValue;
if (i==outputIdx) {
result.push_back(outputValue);
}
}
}
return result;
}
void NN::print(ostream& output) {
// output << "View at: http://www.webgraphviz.com/\n";
output << "digraph G {\n";
stringstream edges;
for (int i=0; i<layers.size(); ++i) {
output << "\tsubgraph cluster_" << i << " {\n"
<< "\t\tstyle=filled;\n"
<< "\t\tcolor=lightgrey;\n"
<< "\t\tnode [style=filled,color=white];\n";
for (int j=0; j<layers[i].nodes.size(); ++j) {
output << "\t\t\"" << layers[i].nodes[j]->index << "\"\n";
for (int k=0; k<layers[i].nodes[j]->outputs.size(); k++) {
edges << "\t\"" << layers[i].nodes[j]->index << "\" -> \"" << layers[i].nodes[j]->outputs[k]->to->index << "\";\n";
}
}
output << "\t\tlabel = \"layer #" << i << "\";\n";
output << "\t}\n";
}
output << edges.str();
output << "}\n";
}
Vertex* NN::addVertex(std::vector<Vertex*> inputs, std::vector<Vertex*> outputs) {
Vertex* vertex = new Vertex;
vertex->index = nodeIdx++;
vertex->bias = 0.0;
vertex->error = 0.0;
vertex->delta = 0.0;
vertex->output = 0.0;
vector<Edge*> edges;
for (int i=0; i<inputs.size(); ++i) {
edges.push_back(addEdge(inputs[i], vertex));
}
for (int i=0; i<outputs.size(); ++i) {
edges.push_back(addEdge(vertex, outputs[i]));
}
nodes.push_back(vertex);
int layerIdx = findLayer(vertex);
if (layerIdx < (int)layers.size()) {
layers[layerIdx].nodes.push_back(vertex);
} else {
Layer layer;
layer.nodes.push_back(vertex);
layers.insert(layers.begin()+layerIdx, layer);
}
vertex->layer = layerIdx;
layers[layerIdx].edges.insert(layers[layerIdx].edges.end(), edges.begin(), edges.end());
return vertex;
}
Vertex* NN::addVertex(int inputCount, int outputCount, int layer, bool useBasicLayering) {
vector<Vertex*> inputs;
vector<Vertex*> outputs;
bool useBasicLayeringSave = useBasicLayering;
useBasicLayering = true;
for (int i=0; i<inputCount; ++i) {
Layer* inputLayer = useBasicLayering ? &layers[layer-1] : &layers[random(0,layer-1)];
int inputVertex = random(0, inputLayer->nodes.size()-1);
inputs.push_back(inputLayer->nodes[inputVertex]);
if (i==0) useBasicLayering=useBasicLayeringSave;
}
for (int i=0; i<outputCount; ++i) {
Layer* outputLayer;
if (useBasicLayering) {
outputLayer = &layers[layer+1];
}else{
int outputLayerId = random(0,layer+1);
if (outputLayerId == layer) {
outputLayerId++;
}
outputLayer = &layers[outputLayerId];
}
int outputVertex = random(0, outputLayer->nodes.size()-1);
outputs.push_back(outputLayer->nodes[outputVertex]);
}
return addVertex(inputs, outputs);
}
Vertex* NN::addVertex(int inputs, int outputs, bool isInput, bool isOutput, int layer, bool useStrictLayers) {
Vertex* vertex = new Vertex;
vertex->index = nodeIdx++;
vertex->isOutput = isOutput;
vertex->bias = 0.0;
vertex->error = 0.0;
vertex->delta = 0.0;
vertex->output = 0.0;
vector<Edge*> edges;
inputs = inputs>0?inputs:inputs+1;
int* edgeNodeMapping = (int*) malloc(sizeof(int)*inputs);
if (isInput) {
inputs--;
}
if (layer > 0) {
Layer* inputLayer = &layers[layer-1];
int inputVertex = random(0, inputLayer->nodes.size()-1);
edges.push_back(addEdge(inputLayer->nodes[inputVertex], vertex));
inputs--;
}
for (int i=0; i<inputs; ++i) {
Layer* inputLayer = useStrictLayers ? &layers[layer-1] : &layers[random(0,layer-1)];
int inputVertex = random(0, inputLayer->nodes.size()-1);
edges.push_back(addEdge(inputLayer->nodes[inputVertex], vertex));
}
for (int i=0; i<outputs; ++i) {
int outputVertex = random(inputSize, nodes.size());
}
nodes.push_back(vertex);
int layerIdx = findLayer(vertex);
if (isOutput && outputIdx != 0) {
isOutput = false;
layerIdx = outputIdx;
}
if (layerIdx < (int)layers.size() && !isOutput) {
layers[layerIdx].nodes.push_back(vertex);
} else {
if (isOutput) {
outputIdx = layers.size();
layerIdx = outputIdx;
}
Layer layer;
layer.nodes.push_back(vertex);
layers.insert(layers.begin()+layerIdx, layer);
}
vertex->layer = layerIdx;
layers[layerIdx].edges.insert(layers[layerIdx].edges.end(), edges.begin(), edges.end());
return vertex;
}
// TODO: remove edge from layer
void NN::removeVertex() {
int vertexIdx = random(0, nodes.size());
Vertex* vertex = nodes[vertexIdx];
for (int i=0; i<vertex->inputs.size(); ++i) {
removeEdge(vertex->inputs[i]);
}
for (int i=0; i<vertex->outputs.size(); ++i) {
removeEdge(vertex->outputs[i]);
}
nodes.erase(nodes.begin() + vertexIdx);
}
void NN::removeEdge(Edge* edge) {
removeFromVector(edge->from->outputs, edge);
removeFromVector(edge->to->inputs, edge);
delete edge;
}
Edge* NN::addEdge(Vertex* from, Vertex* to) {
return addEdge(from, to, false);
}
Edge* NN::addEdge(Vertex* from, Vertex* to, bool addLayer) {
Edge *edge = new Edge;
edge->to = to;
edge->from = from;
edge->weight = 0.1;
edge->change = 0.0;
to->inputs.push_back(edge);
from->outputs.push_back(edge);
edges.push_back(edge);
if (addLayer) {
layers[to->layer].edges.push_back(edge);
}
return edge;
}
template<typename T>
inline void removeFromVector(vector<T> & v, const T & item) {
for(typename vector<T>::iterator iter = v.begin(); iter != v.end(); ++iter) {
if(*iter == item) {
v.erase(iter);
break;
}
}
}
int NN::findLayer(Vertex* vertex) {
int maxDepth = -1;
for (int i=0; i<vertex->inputs.size(); ++i) {
maxDepth = max(maxDepth, vertex->inputs[i]->from->layer);
}
vertex->layer = maxDepth+1;
return maxDepth+1;
}
int random(int min, int max) {
return rand()%(max-min + 1) + min;
}
float average(float average, float dataPoint) {
static int N = 20;
average -= average / N;
average += dataPoint / N;
return average;
}
| f77ab786d172d232581378869fe586eeeb7c2e71.cu | #include <vector>
#include <string>
#include <iostream>
#include <algorithm>
#include <sstream>
#include <ctime>
#include <signal.h>
#include <unistd.h>
#include <stdio.h>
#include <fstream>
#include <stdlib.h>
#include <math.h>
#include <unordered_set>
#include <curand.h>
#include <curand_kernel.h>
using namespace std;
struct Edge;
struct Vertex;
struct Layer;
int random(int min, int max);
float average(float average, float dataPoint);
void setupListeners();
template<typename T>
inline void removeFromVector(vector<T> & v, const T & item);
struct Edge {
float weight;
float change;
Vertex* from;
Vertex* to;
};
struct Vertex {
vector<Edge*> inputs;
vector<Edge*> outputs;
float output;
float bias;
int layer;
bool isOutput;
int index;
float delta;
float error;
};
struct Layer {
vector<Vertex*> nodes;
vector<Edge*> edges;
int* edgeNodeMapping;
};
class NN {
public:
NN(bool useGPU, int input, vector<int> hidden, int output);
vector<Layer> layers;
vector<Vertex*> nodes;
vector<Edge*> edges;
int inputSize;
int outputSize;
int outputIdx;
bool useGPU;
int nodeIdx;
Vertex* addVertex(std::vector<Vertex*> inputs, std::vector<Vertex*> outputs);
Vertex* addVertex(int inputCount, int outputCount, int layer, bool useBasicLayering);
Vertex* addVertex(int inputs, int outputs, bool isInput, bool isOutput, int layer, bool useStrictLayers);
void removeVertex();
Edge* addEdge(Vertex* from, Vertex* to, bool addLayer);
Edge* addEdge(Vertex* from, Vertex* to);
void removeEdge(Edge* edge);
int findLayer(Vertex* vertex);
void initRNGCPU();
float trainGPU(vector<float> inputs, vector<float> target);
float trainGPU(vector<float> inputs, vector<float> target, float learningRate, float momentum);
vector<float> runGPULauncher(vector<float>& inputs);
vector<float> runCpu(vector<float>& inputs);
vector<float> run(vector<float>& inputs);
void updateHostMemory();
void updateDeviceMemory();
void print(ostream& output);
double layerDist(double x, int mean);
// CUDA pointers
float* d_weights;
float* d_outputs;
float* d_bias;
int* d_edgeNodeMappingTo;
int* d_edgeNodeMappingFrom;
float* d_errors;
float* d_deltas;
float* d_changes;
float* d_target;
bool* d_managementThreads;
curandState_t* RNGStates;
};
bool userEntryMode = false;
bool running = true;
void my_handler(int s){
printf("Caught signal %d. Entering user input mode.\n",s);
if (userEntryMode) {
running = false;
}
userEntryMode = true;
setupListeners();
}
void setupListeners() {
struct sigaction sigIntHandler;
sigIntHandler.sa_handler = my_handler;
sigemptyset(&sigIntHandler.sa_mask);
sigIntHandler.sa_flags = 0;
sigaction(SIGINT, &sigIntHandler, NULL);
}
/*int test(bool useGPU, vector<vector<float>> inputs, vector<vector<float>> targets, vector<int> hidden) {
int inputSize = inputs[0].size();
int outputSize = targets[0].size();
srand(0);
setupListeners();
NN nn(useGPU, inputSize, hidden, outputSize);
//ofstream outputFile(string(useGPU?"gpu":"cpu") + ".graph", ofstream::out);
//nn.print(outputFile);
clock_t begin = clock();
vector<float> target(outputSize);
vector<float> input(inputSize);
float error = 1.0;
do {
int index = random(0, inputSize);
if (userEntryMode) {
std::cout << "\n";
cin >> iInput;
std::cout << "\n";
}
} while(error > 0 && running);
}*/
int test(bool useGPU, int inputSize, vector<int> hidden, int outputSize) {
srand(0);
setupListeners();
NN nn(useGPU, inputSize, hidden, outputSize);
ofstream outputFile(string(useGPU?"gpu":"cpu") + ".graph", ofstream::out);
nn.print(outputFile);
clock_t begin = clock();
vector<float> target(outputSize);
vector<float> input(inputSize);
float error = 1.0;
do {
int iInput = random(100, 1000);
if (userEntryMode) {
std::cout << "\n";
cin >> iInput;
std::cout << "\n" << iInput << "\n";
}
int n = iInput;
int i = 0;
while (n) {
input[i++] = (n % 10)/10.0;
n /= 10;
}
//input[0] = iInput/1000.0;
target[0] = iInput%2==0?1.0:0.0;
//target[0] = (iInput>500?1.0:0.0);
std::cout << "Expected: " << target[0] << ", Input: " << iInput << ", ";
if (userEntryMode) {
float result = nn.runGPULauncher(input)[0];
cout << "Output: " << result << "\n";
}else{
error = average(error, nn.trainGPU(input, target, error*2, error));
std::cout << "\rError: " << error;
}
} while(error > 0.005 && running);
clock_t end = clock();
double elapsed_secs = double(end - begin) / CLOCKS_PER_SEC;
cout << "Time ms: " << elapsed_secs*1000 << "\n";
return 0;
}
int main(int argc, char** argv) {
vector<int> hiddenSizes;
hiddenSizes.push_back(5);
hiddenSizes.push_back(10);
hiddenSizes.push_back(50);
hiddenSizes.push_back(200);
hiddenSizes.push_back(50);
hiddenSizes.push_back(10);
//hiddenSizes.push_back(1);
for (int i=0; i<1; i++) {
test(true, 3, hiddenSizes, 1);
}
cout << "--------------------------\n";
/*for (int i=0; i<1; i++) {
test(false, 1, hiddenSizes, 1);
}*/
}
// layers: [inputSize, hiddenSize, outputSize]
NN::NN(bool useGPU, int input, vector<int> hidden, int output) {
// comment in for release
//srand(time(NULL));
inputSize = 0;
outputSize = 0;
outputIdx = 0;
nodeIdx = 0;
this->useGPU = useGPU;
int nodes = 0;
for (int i=0; i<input; ++i) {
addVertex(vector<Vertex*>(), vector<Vertex*>());
//addVertex(0, 0, true, false, 0, false);
nodes++;
inputSize++;
}
for (int i=0; i<hidden.size(); ++i) {
int prevLayerNodes = nodes;
for (int j=0; j<hidden[i]; ++j) {
addVertex(random(1, prevLayerNodes), 0, i+1, false);
//addVertex(random(1, prevLayerNodes), 0, false, false, i+1, false);
nodes++;
}
}
for (int i=0; i<output; ++i) {
addVertex(random(1, nodes), 0, hidden.size()+1, false);
//addVertex(random(1, nodes), 0, false, true, 2, false);
outputSize++;
}
/*for (int i=0; i<layers.size()-1; ++i) {
for (int j=0; j<layers[i].nodes.size(); ++j) {
Vertex* vertex = layers[i].nodes[j];
if (vertex->outputs.size() == 0) {
Layer& outputLayer = layers[i+1];
Vertex* outputVertex = outputLayer.nodes[
random(0,
outputLayer.nodes.size()-1
)
];
addEdge(vertex, outputVertex, true);
}
}
}*/
if (useGPU) {
updateDeviceMemory();
}
}
double NN::layerDist(double x, int mean) {
return -1*pow(2*x-mean,2)+mean;
}
void NN::updateDeviceMemory() {
initRNGCPU();
int edgeCount = edges.size();
int nodeCount = nodes.size();
size_t floatEdge = sizeof(float)*edgeCount;
size_t floatNode = sizeof(float)*nodeCount;
size_t intEdge = sizeof(int)*edgeCount;
// TODO Free previous device memory
// Malloc device memory
cudaMalloc(&d_weights, floatEdge);
cudaMalloc(&d_outputs, floatNode);
cudaMalloc(&d_bias, floatNode);
cudaMalloc(&d_edgeNodeMappingTo, intEdge);
cudaMalloc(&d_edgeNodeMappingFrom, intEdge);
cudaMalloc(&d_errors, floatNode);
cudaMalloc(&d_deltas, floatNode);
cudaMalloc(&d_changes, floatEdge);
cudaMalloc(&d_target, sizeof(float)*outputSize);
cudaMalloc(&d_managementThreads, sizeof(bool)*edgeCount);
cudaMemset(d_outputs, 0, floatNode);
cudaMemset(d_changes, 0, floatEdge);
float* weights = (float*) malloc(floatEdge);
float* bias = (float*) malloc(floatNode);
int* edgeNodeMappingTo = (int*) malloc(intEdge);
int* edgeNodeMappingFrom = (int*) malloc(intEdge);
float* errors = (float*) malloc(floatNode);
float* deltas = (float*) malloc(floatNode);
float* changes = (float*) malloc(floatEdge);
bool* managementThreads = (bool*) malloc(sizeof(bool)*edgeCount);
int currEdge = 0;
int currNode = 0;
for (int i=0; i<layers.size(); ++i) {
int edgesInLayer = 0;
for (int j=0; j<layers[i].nodes.size(); ++j) {
Vertex* node = layers[i].nodes[j];
bias[currNode] = node->bias;
errors[currNode] = node->error;
deltas[currNode] = node->delta;
for (int k=0; k<node->inputs.size(); ++k) {
edgesInLayer++;
Edge* edge = node->inputs[k];
weights[currEdge] = edge->weight;
changes[currEdge] = edge->change;
managementThreads[currEdge] = (k==0);
edgeNodeMappingTo[currEdge] = currNode;
edgeNodeMappingFrom[currEdge] = edge->from->index;
currEdge++;
}
int inputCount = node->inputs.size();
node->index = currNode;
currNode++;
}
cout << "Edges In Layer " << i << " = " << edgesInLayer << "\n";
}
for (int i=0; i<edgeCount; ++i) {
if (edgeNodeMappingTo[i] < 0 || edgeNodeMappingTo[i]>nodeCount)
cout << "edgeNodeMappingTo: [" << i << "]: " << edgeNodeMappingTo[i] << "\n";
if (edgeNodeMappingFrom[i] < 0 || edgeNodeMappingFrom[i]>nodeCount)
cout << "edgeNodeMappingFrom: [" << i << "]: " << edgeNodeMappingFrom[i] << "\n";
if (weights[i] < 0 || weights[i]>1)
cout << "weights: [" << i << "]: " << weights[i] << "\n";
}
for (int i=0; i<edgeCount; ++i) {
if (edgeNodeMappingTo[i] < 0 || edgeNodeMappingTo[i]>nodeCount)
cout << "edgeNodeMappingTo: [" << i << "]: " << edgeNodeMappingTo[i] << "\n";
}
cudaMemcpy(d_weights, weights, floatEdge, cudaMemcpyHostToDevice);
cudaMemcpy(d_bias, bias, floatNode, cudaMemcpyHostToDevice);
cudaMemcpy(d_edgeNodeMappingTo, edgeNodeMappingTo, intEdge, cudaMemcpyHostToDevice);
cudaMemcpy(d_edgeNodeMappingFrom, edgeNodeMappingFrom, intEdge, cudaMemcpyHostToDevice);
cudaMemcpy(d_errors, bias, floatNode, cudaMemcpyHostToDevice);
cudaMemcpy(d_deltas, bias, floatNode, cudaMemcpyHostToDevice);
cudaMemcpy(d_changes, bias, floatNode, cudaMemcpyHostToDevice);
cudaMemcpy(d_managementThreads, managementThreads, sizeof(bool)*edgeCount, cudaMemcpyHostToDevice);
free(weights);
free(bias);
free(edgeNodeMappingTo);
free(edgeNodeMappingFrom);
free(errors);
free(deltas);
free(changes);
free(managementThreads);
}
void NN::updateHostMemory() {
int edgeCount = edges.size();
int nodeCount = nodes.size();
size_t floatEdge = sizeof(float)*edgeCount;
size_t floatNode = sizeof(float)*nodeCount;
size_t intEdge = sizeof(int)*edgeCount;
//size_t intNode = sizeof(int)*nodeCount;
float* weights = (float*) malloc(floatEdge);
float* bias = (float*) malloc(floatNode);
int* edgeNodeMappingTo = (int*) malloc(intEdge);
int* edgeNodeMappingFrom = (int*) malloc(intEdge);
float* errors = (float*) malloc(floatNode);
float* deltas = (float*) malloc(floatNode);
float* changes = (float*) malloc(floatEdge);
cudaMemcpy(weights, d_weights, floatEdge, cudaMemcpyDeviceToHost);
cudaMemcpy(bias, d_bias, floatNode, cudaMemcpyDeviceToHost);
cudaMemcpy(edgeNodeMappingTo, d_edgeNodeMappingTo, intEdge, cudaMemcpyDeviceToHost);
cudaMemcpy(edgeNodeMappingFrom, d_edgeNodeMappingFrom, intEdge, cudaMemcpyDeviceToHost);
cudaMemcpy(errors, d_errors, floatNode, cudaMemcpyDeviceToHost);
cudaMemcpy(deltas, d_deltas, floatNode, cudaMemcpyDeviceToHost);
cudaMemcpy(changes, d_changes, floatNode, cudaMemcpyDeviceToHost);
unordered_set<int> nodesCreated;
vector<Vertex*> emptyEdgeList;
for (int i=0; i<edgeCount; ++i) {
int nodeTo = edgeNodeMappingTo[i];
int nodeFrom = edgeNodeMappingFrom[i];
int nodeToCreate = -1;
if (!nodesCreated.count(nodeTo)) {
nodeToCreate = nodeTo;
}else if (!nodesCreated.count(nodeFrom)) {
nodeToCreate = nodeFrom;
}
if (nodeToCreate != -1) {
nodesCreated.insert(nodeToCreate);
Vertex* vertex = addVertex(emptyEdgeList, emptyEdgeList);
vertex->delta = deltas[nodeToCreate];
vertex->error = errors[nodeToCreate];
//vertex->change = changes[nodeToCreate];
vertex->bias = bias[nodeToCreate];
}
}
free(weights);
free(bias);
free(edgeNodeMappingTo);
free(edgeNodeMappingFrom);
free(errors);
free(deltas);
free(changes);
}
vector<float> NN::run(vector<float>& inputs) {
if (useGPU) {
return runGPULauncher(inputs);
}else{
return runCpu(inputs);
}
}
__global__ void initRNG(unsigned int seed, curandState_t* states) {
/* we have to initialize the state */
curand_init(seed, /* the seed can be the same for each core, here we pass the time in from the CPU */
blockIdx.x, /* the sequence number should be different for each core (unless you want all
cores to get the same sequence of numbers for some reason - use thread id! */
0, /* the offset is how much extra we advance in the sequence for each call, can be 0 */
&states[blockIdx.x]);
}
void NN::initRNGCPU() {
int N = edges.size()/512+1;
cudaMalloc((void**) &RNGStates, N * sizeof(curandState_t));
initRNG<<<N, 1>>>(/*time(0)*/0, RNGStates);
}
__global__ void runGPU(float* weights, int* edgeNodeMappingTo, int* edgeNodeMappingFrom, float* outputs, float* bias, bool* managementThreads, int offset, int n) {
int id = blockIdx.x*blockDim.x+threadIdx.x + offset;
if (id < n) {
int nodeTo = edgeNodeMappingTo[id];
int nodeFrom = edgeNodeMappingFrom[id];
atomicAdd(&outputs[nodeTo], outputs[nodeFrom] * weights[id]);
/*if (managementThreads[id]) {
outputs[nodeTo] = 1/(1+exp(-1*(outputs[nodeTo]+bias[nodeTo])));
}*/
}
}
__global__ void runGPUPost(float* outputs, float* bias, int offset, int n) {
int id = blockIdx.x*blockDim.x+threadIdx.x + offset;
if (id < n) {
outputs[id] = 1/(1+exp(-1*(outputs[id]+bias[id])));
}
}
__global__ void runGPUProb(float* weights, int* edgeNodeMappingTo, int* edgeNodeMappingFrom, float* outputs, float* bias, int offset, int n, curandState_t* states) {
int id = blockIdx.x*blockDim.x+threadIdx.x + offset;
if (id < n) {
int nodeTo = edgeNodeMappingTo[id];
int nodeFrom = edgeNodeMappingFrom[id];
if (curand_uniform(&states[blockIdx.x]) >= weights[id]) {
atomicAdd(&outputs[nodeTo], outputs[nodeFrom]);
}
outputs[nodeTo] = 1/(1+exp(-1*(outputs[nodeTo]+bias[nodeTo])));
}
}
__global__ void runGPUMultiplex(bool* activeEdges, float* weights, int* edgeNodeMappingTo, int* edgeNodeMappingFrom, float* outputs, float* bias, int offset, int n) {
int id = blockIdx.x*blockDim.x+threadIdx.x + offset;
if (id < n) {
int nodeTo = edgeNodeMappingTo[id];
int nodeFrom = edgeNodeMappingFrom[id];
if (activeEdges[id]) {
atomicAdd(&outputs[nodeTo], outputs[nodeFrom] * weights[id]);
}
outputs[nodeTo] = 1/(1+exp(-1*(outputs[nodeTo]+bias[nodeTo])));
//outputs[nodeTo]
}
}
/*
learningRate
momentum
target: Node-wise
weights: Edge-wise
outputs: Node-wise
edgeNodeMappingFrom: Edge-wise
edgeNodeMappingTo: Edge-wise
nodeRunCount: Node-wise
initialNodeRunCount: Node-wise
errors: Node-wise
deltas: Node-wise
bias: Node-wise
changes: Edge-wise
n
*/
__global__ void learnGPU(float learningRate,
float momentum,
float* weights,
float* outputs,
int* edgeNodeMappingFrom,
int* edgeNodeMappingTo,
float* errors,
float* deltas,
float* bias,
float* changes,
int offset,
int n,
float* buffer) {
int id = blockIdx.x*blockDim.x+threadIdx.x+offset;
if (id < offset+n && id>=0) {
int nodeTo = edgeNodeMappingTo[id];
int nodeFrom = edgeNodeMappingFrom[id];
float output = outputs[nodeFrom];
float& weight = weights[id];
float delta = deltas[nodeTo];
atomicAdd(&errors[nodeFrom], delta * weight);
deltas[nodeFrom] = errors[nodeFrom] * output * (1-output);
//atomicAdd(&nodeRunCount[nodeFrom], -1);
//if (nodeRunCount[nodeFrom] == 0) {
//bias[nodeTo] += learningRate * delta;
// nodeRunCount[nodeFrom] = initialNodeRunCount[nodeFrom];
//}
float& change = changes[id];
change = (learningRate * delta * output)
+ (momentum * change);
weight += change;
}
}
__global__ void learnGPUPost(float* deltas, float* bias, float learningRate, int offset, int n) {
int id = blockIdx.x*blockDim.x+threadIdx.x+offset;
if (id < offset+n && id>=0) {
bias[id] = learningRate * deltas[id];
}
}
#define cudaCheckErrors(msg) \
do { \
cudaError_t __err = cudaGetLastError(); \
if (__err != cudaSuccess) { \
fprintf(stderr, "Fatal error: %s (%s at %s:%d)\n", \
msg, cudaGetErrorString(__err), \
__FILE__, __LINE__); \
fprintf(stderr, "*** FAILED - ABORTING\n"); \
exit(1); \
} \
} while (0)
float NN::trainGPU(vector<float> inputs, vector<float> target) {
return trainGPU(inputs, target, 0.3, 0.1);
}
float NN::trainGPU(vector<float> inputs, vector<float> target, float learningRate, float momentum) {
vector<float> results = runGPULauncher(inputs);
int nodeSize = nodes.size();
int outputSize = target.size();
float errors[nodeSize];
float deltas[nodeSize];
memset(errors, 0, sizeof errors);
memset(deltas, 0, sizeof deltas);
float errorSum = 0;
for (int i=1; i<=outputSize; ++i) {
//cout << "output[1] = " << results[results.size()-i] << ", output[2] = " << std::round(results[results.size()-i]) << "\n";
float output = results[results.size()-i];
std::cout << "Output: " << output << "\n";
errors[nodeSize-i] = target[outputSize-i] - output;
deltas[nodeSize-i] = errors[nodeSize-i] * output * (1-output);
errorSum += errors[nodeSize-i];
}
float error = abs(errorSum/outputSize);
//std::cout << "Error: " << error << "\n";
cudaMemcpy(d_errors, &errors[0], nodeSize*sizeof(float), cudaMemcpyHostToDevice);
cudaMemcpy(d_deltas, &deltas[0], nodeSize*sizeof(float), cudaMemcpyHostToDevice);
int offset = edges.size();
int nodesOffset = nodes.size();
float* d_buffer;
//cudaMalloc(&d_buffer, offset*sizeof(float));
for (int i=layers.size()-1; i>0; --i) {
int N = layers[i].edges.size();
int nodesN = layers[i].nodes.size();
offset -= N;
nodesOffset -= nodesN;
//std::cout << "N["<<i<<"]: "<<N<<", offset["<<i<<"]: " << offset << "\n";
int edgeCount = layers[i].edges.size();
int nodeCount = layers[i].nodes.size();
int blockSize = 512;// or 64?
int gridSize = edgeCount/blockSize + 1;
learnGPU<<<gridSize, blockSize>>>(learningRate,
momentum,
d_weights,
d_outputs,
d_edgeNodeMappingFrom,
d_edgeNodeMappingTo,
d_errors,
d_deltas,
d_bias,
d_changes,
offset,
N,
d_buffer);
blockSize = 512;// or 64?
gridSize = nodeCount/blockSize + 1;
learnGPUPost<<<gridSize, blockSize>>>(d_deltas, d_bias, learningRate, nodesOffset, nodesN);
//cudaCheckErrors("kernel");
}
/*float* buffer = (float*) (sizeof(float)*edges.size());
cudaMemcpy(buffer, d_buffer, edges.size()*sizeof(float), cudaMemcpyDeviceToHost);
for (int i=0; i<edges.size(); ++i) {
std::cout << "buffer["<<i<<"] = " << buffer[i] << "\n";
}*/
return error;
}
vector<float> NN::runGPULauncher(vector<float>& inputs) {
clock_t begin = clock();
//cout << "Time ms: " << ((clock() - begin)/(double)CLOCKS_PER_SEC)*1000 << "\n";
for (int i=inputs.size(); i<nodes.size(); ++i) {
inputs.push_back(0);
}
cudaMemcpy(d_outputs, &inputs[0], inputs.size()*sizeof(float), cudaMemcpyHostToDevice);
//cudaCheckErrors("copy");
//cout << "Time Post Copy: " << ((clock() - begin)/(double)CLOCKS_PER_SEC)*1000 << "\n";
int offset = 0;
int nodeOffset = layers[0].nodes.size();
//cout << "\nnodeCount: " << layers[0].nodes.size() << "\n";
for (int i=1; i<layers.size(); ++i) {
int edgeCount = layers[i].edges.size();
int nodeCount = layers[i].nodes.size();
//cout << "nodeCount: " << nodeCount << "\n";
int gridSize, blockSize;
blockSize = 512;// or 64?
gridSize = edgeCount/blockSize + 1;
runGPU<<<gridSize, blockSize>>>(d_weights, d_edgeNodeMappingTo, d_edgeNodeMappingFrom, d_outputs, d_bias, d_managementThreads, offset, offset+edgeCount);
//runGPUProb<<<gridSize, blockSize>>>(d_weights, d_edgeNodeMappingTo, d_edgeNodeMappingFrom, d_outputs, d_bias, offset, offset+edgeCount, RNGStates);
blockSize = 512;// or 64?
gridSize = nodeCount/blockSize + 1;
runGPUPost<<<gridSize, blockSize>>>(d_outputs, d_bias, nodeOffset, nodeOffset+nodeCount);
//cout << "Time Post Kernel " << i << ": " << ((clock() - begin)/(double)CLOCKS_PER_SEC)*1000 << "\n";
//cudaCheckErrors("kernel");
offset += edgeCount;
nodeOffset += nodeCount;
}
int outputLayerSize = layers[layers.size()-1].nodes.size();
//cout << "\nOutput Size = " << outputLayerSize << "\n";
float* outputs = (float*) malloc( sizeof(float)*outputLayerSize );
//float* weights = (float*) malloc( sizeof(float)*edges.size() );
cudaMemcpy(outputs, d_outputs+(nodes.size()-outputLayerSize), outputLayerSize*sizeof(float), cudaMemcpyDeviceToHost);
//cudaMemcpy(weights, d_weights, edges.size()*sizeof(float), cudaMemcpyDeviceToHost);
for (int i=0; i<edges.size(); ++i) {
// std::cout << "Weight["<<i<<"] = " << weights[i] << "\n";
}
//cout << "Time post copy output: " << ((clock() - begin)/(double)CLOCKS_PER_SEC)*1000 << "\n";
vector<float> result(outputs, outputs + outputLayerSize);
free(outputs);
return result;
}
vector<float> NN::runCpu(vector<float>& inputs) {
vector<float> result;
while (inputs.size() < layers[0].nodes.size()) {
inputs.push_back(0);
}
// Stage input vertices
for (int i=0; i<inputs.size(); ++i) {
layers[0].nodes[i]->output = inputs[i];
result.push_back(inputs[i]);
}
int layerSize = layers.size();
// Forward propegate each layer
for (int i=1; i<layerSize; ++i) {
vector<Vertex*> layerNodes = layers[i].nodes;
for (int j=0; j<layerNodes.size(); ++j) {
Vertex* node = layerNodes[j];
float sum = node->bias;
for (int inputIdx=0; inputIdx < node->inputs.size(); ++inputIdx) {
Edge* inputEdge = node->inputs[inputIdx];
sum += inputEdge->weight * inputEdge->from->output;
}
float outputValue = 1/(1+exp(-1*sum));
node->output = outputValue;
if (i==outputIdx) {
result.push_back(outputValue);
}
}
}
return result;
}
void NN::print(ostream& output) {
// output << "View at: http://www.webgraphviz.com/\n";
output << "digraph G {\n";
stringstream edges;
for (int i=0; i<layers.size(); ++i) {
output << "\tsubgraph cluster_" << i << " {\n"
<< "\t\tstyle=filled;\n"
<< "\t\tcolor=lightgrey;\n"
<< "\t\tnode [style=filled,color=white];\n";
for (int j=0; j<layers[i].nodes.size(); ++j) {
output << "\t\t\"" << layers[i].nodes[j]->index << "\"\n";
for (int k=0; k<layers[i].nodes[j]->outputs.size(); k++) {
edges << "\t\"" << layers[i].nodes[j]->index << "\" -> \"" << layers[i].nodes[j]->outputs[k]->to->index << "\";\n";
}
}
output << "\t\tlabel = \"layer #" << i << "\";\n";
output << "\t}\n";
}
output << edges.str();
output << "}\n";
}
Vertex* NN::addVertex(std::vector<Vertex*> inputs, std::vector<Vertex*> outputs) {
Vertex* vertex = new Vertex;
vertex->index = nodeIdx++;
vertex->bias = 0.0;
vertex->error = 0.0;
vertex->delta = 0.0;
vertex->output = 0.0;
vector<Edge*> edges;
for (int i=0; i<inputs.size(); ++i) {
edges.push_back(addEdge(inputs[i], vertex));
}
for (int i=0; i<outputs.size(); ++i) {
edges.push_back(addEdge(vertex, outputs[i]));
}
nodes.push_back(vertex);
int layerIdx = findLayer(vertex);
if (layerIdx < (int)layers.size()) {
layers[layerIdx].nodes.push_back(vertex);
} else {
Layer layer;
layer.nodes.push_back(vertex);
layers.insert(layers.begin()+layerIdx, layer);
}
vertex->layer = layerIdx;
layers[layerIdx].edges.insert(layers[layerIdx].edges.end(), edges.begin(), edges.end());
return vertex;
}
Vertex* NN::addVertex(int inputCount, int outputCount, int layer, bool useBasicLayering) {
vector<Vertex*> inputs;
vector<Vertex*> outputs;
bool useBasicLayeringSave = useBasicLayering;
useBasicLayering = true;
for (int i=0; i<inputCount; ++i) {
Layer* inputLayer = useBasicLayering ? &layers[layer-1] : &layers[random(0,layer-1)];
int inputVertex = random(0, inputLayer->nodes.size()-1);
inputs.push_back(inputLayer->nodes[inputVertex]);
if (i==0) useBasicLayering=useBasicLayeringSave;
}
for (int i=0; i<outputCount; ++i) {
Layer* outputLayer;
if (useBasicLayering) {
outputLayer = &layers[layer+1];
}else{
int outputLayerId = random(0,layer+1);
if (outputLayerId == layer) {
outputLayerId++;
}
outputLayer = &layers[outputLayerId];
}
int outputVertex = random(0, outputLayer->nodes.size()-1);
outputs.push_back(outputLayer->nodes[outputVertex]);
}
return addVertex(inputs, outputs);
}
Vertex* NN::addVertex(int inputs, int outputs, bool isInput, bool isOutput, int layer, bool useStrictLayers) {
Vertex* vertex = new Vertex;
vertex->index = nodeIdx++;
vertex->isOutput = isOutput;
vertex->bias = 0.0;
vertex->error = 0.0;
vertex->delta = 0.0;
vertex->output = 0.0;
vector<Edge*> edges;
inputs = inputs>0?inputs:inputs+1;
int* edgeNodeMapping = (int*) malloc(sizeof(int)*inputs);
if (isInput) {
inputs--;
}
if (layer > 0) {
Layer* inputLayer = &layers[layer-1];
int inputVertex = random(0, inputLayer->nodes.size()-1);
edges.push_back(addEdge(inputLayer->nodes[inputVertex], vertex));
inputs--;
}
for (int i=0; i<inputs; ++i) {
Layer* inputLayer = useStrictLayers ? &layers[layer-1] : &layers[random(0,layer-1)];
int inputVertex = random(0, inputLayer->nodes.size()-1);
edges.push_back(addEdge(inputLayer->nodes[inputVertex], vertex));
}
for (int i=0; i<outputs; ++i) {
int outputVertex = random(inputSize, nodes.size());
}
nodes.push_back(vertex);
int layerIdx = findLayer(vertex);
if (isOutput && outputIdx != 0) {
isOutput = false;
layerIdx = outputIdx;
}
if (layerIdx < (int)layers.size() && !isOutput) {
layers[layerIdx].nodes.push_back(vertex);
} else {
if (isOutput) {
outputIdx = layers.size();
layerIdx = outputIdx;
}
Layer layer;
layer.nodes.push_back(vertex);
layers.insert(layers.begin()+layerIdx, layer);
}
vertex->layer = layerIdx;
layers[layerIdx].edges.insert(layers[layerIdx].edges.end(), edges.begin(), edges.end());
return vertex;
}
// TODO: remove edge from layer
void NN::removeVertex() {
int vertexIdx = random(0, nodes.size());
Vertex* vertex = nodes[vertexIdx];
for (int i=0; i<vertex->inputs.size(); ++i) {
removeEdge(vertex->inputs[i]);
}
for (int i=0; i<vertex->outputs.size(); ++i) {
removeEdge(vertex->outputs[i]);
}
nodes.erase(nodes.begin() + vertexIdx);
}
void NN::removeEdge(Edge* edge) {
removeFromVector(edge->from->outputs, edge);
removeFromVector(edge->to->inputs, edge);
delete edge;
}
Edge* NN::addEdge(Vertex* from, Vertex* to) {
return addEdge(from, to, false);
}
Edge* NN::addEdge(Vertex* from, Vertex* to, bool addLayer) {
Edge *edge = new Edge;
edge->to = to;
edge->from = from;
edge->weight = 0.1;
edge->change = 0.0;
to->inputs.push_back(edge);
from->outputs.push_back(edge);
edges.push_back(edge);
if (addLayer) {
layers[to->layer].edges.push_back(edge);
}
return edge;
}
template<typename T>
inline void removeFromVector(vector<T> & v, const T & item) {
for(typename vector<T>::iterator iter = v.begin(); iter != v.end(); ++iter) {
if(*iter == item) {
v.erase(iter);
break;
}
}
}
int NN::findLayer(Vertex* vertex) {
int maxDepth = -1;
for (int i=0; i<vertex->inputs.size(); ++i) {
maxDepth = max(maxDepth, vertex->inputs[i]->from->layer);
}
vertex->layer = maxDepth+1;
return maxDepth+1;
}
int random(int min, int max) {
return rand()%(max-min + 1) + min;
}
float average(float average, float dataPoint) {
static int N = 20;
average -= average / N;
average += dataPoint / N;
return average;
}
|
5ff7984dedbf7ac758ed8b639c046136b4ada880.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "includes.h"
extern "C"
extern "C"
extern "C"
extern "C"
extern "C"
extern "C"
extern "C"
extern "C"
extern "C"
extern "C"
extern "C"
extern "C"
extern "C"
extern "C"
extern "C"
extern "C"
extern "C"
extern "C"
extern "C"
extern "C"
extern "C"
extern "C"
extern "C"
extern "C"
extern "C"
/*
* Perfom a reduction from data of length 'size' to result, where length of result will be 'number of blocks'.
*/
extern "C"
__global__ void addProduct(int n, float *a, float *b, float *c, float *result)
{
int i = blockIdx.x * blockDim.x + threadIdx.x;
if (i<n)
{
result[i] = a[i] + b[i] * c[i];
}
} | 5ff7984dedbf7ac758ed8b639c046136b4ada880.cu | #include "includes.h"
extern "C"
extern "C"
extern "C"
extern "C"
extern "C"
extern "C"
extern "C"
extern "C"
extern "C"
extern "C"
extern "C"
extern "C"
extern "C"
extern "C"
extern "C"
extern "C"
extern "C"
extern "C"
extern "C"
extern "C"
extern "C"
extern "C"
extern "C"
extern "C"
extern "C"
/*
* Perfom a reduction from data of length 'size' to result, where length of result will be 'number of blocks'.
*/
extern "C"
__global__ void addProduct(int n, float *a, float *b, float *c, float *result)
{
int i = blockIdx.x * blockDim.x + threadIdx.x;
if (i<n)
{
result[i] = a[i] + b[i] * c[i];
}
} |
f8aa4eacafe4593058f9976ced7a367a5ae7db38.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/*
-- MAGMA (version 1.7.0) --
Univ. of Tennessee, Knoxville
Univ. of California, Berkeley
Univ. of Colorado, Denver
@date September 2015
@generated from zlascl.cu normal z -> s, Fri Sep 11 18:29:21 2015
@author Mark Gates
*/
#include "common_magma.h"
#define NB 64
// each thread block does one NB x n block row of A.
// each thread does one row, starting from left edge and moving right.
__global__ void
slascl_full(int m, int n, float mul, float* A, int lda)
{
int ind = blockIdx.x * NB + threadIdx.x;
A += ind;
if (ind < m) {
for (int j=0; j < n; j++ )
A[j*lda] *= mul;
}
}
// each thread block does one NB x n block row of A.
// each thread does one row, starting from left edge and moving right to diagonal.
__global__ void
slascl_lower(int m, int n, float mul, float* A, int lda)
{
int ind = blockIdx.x * NB + threadIdx.x;
int break_d = (ind < n) ? ind : n-1;
A += ind;
if (ind < m) {
for (int j=0; j <= break_d; j++ )
A[j*lda] *= mul;
}
}
// each thread block does one NB x n block row of A.
// each thread does one row, starting from right edge and moving left to diagonal.
__global__ void
slascl_upper(int m, int n, float mul, float* A, int lda)
{
int ind = blockIdx.x * NB + threadIdx.x;
A += ind;
if (ind < m) {
for (int j=n-1; j >= ind; j--)
A[j*lda] *= mul;
}
}
/**
Purpose
-------
SLASCL multiplies the M by N real matrix A by the real scalar
CTO/CFROM. This is done without over/underflow as long as the final
result CTO*A(I,J)/CFROM does not over/underflow. TYPE specifies that
A may be full, upper triangular, lower triangular.
Arguments
---------
@param[in]
type magma_type_t
TYPE indices the storage type of the input matrix A.
= MagmaFull: full matrix.
= MagmaLower: lower triangular matrix.
= MagmaUpper: upper triangular matrix.
Other formats that LAPACK supports, MAGMA does not currently support.
@param[in]
kl INTEGER
Unused, for LAPACK compatability.
@param[in]
ku KU is INTEGER
Unused, for LAPACK compatability.
@param[in]
cfrom REAL
@param[in]
cto REAL
\n
The matrix A is multiplied by CTO/CFROM. A(I,J) is computed
without over/underflow if the final result CTO*A(I,J)/CFROM
can be represented without over/underflow.
CFROM must be nonzero. CFROM and CTO must not be NAN.
@param[in]
m INTEGER
The number of rows of the matrix A. M >= 0.
@param[in]
n INTEGER
The number of columns of the matrix A. N >= 0.
@param[in,out]
dA REAL array, dimension (LDDA,N)
The matrix to be multiplied by CTO/CFROM. See TYPE for the
storage type.
@param[in]
ldda INTEGER
The leading dimension of the array A. LDDA >= max(1,M).
@param[out]
info INTEGER
- = 0: successful exit
- < 0: if INFO = -i, the i-th argument had an illegal value.
@param[in]
queue magma_queue_t
Queue to execute in.
@ingroup magma_saux2
********************************************************************/
extern "C" void
magmablas_slascl_q(
magma_type_t type, magma_int_t kl, magma_int_t ku,
float cfrom, float cto,
magma_int_t m, magma_int_t n,
magmaFloat_ptr dA, magma_int_t ldda,
magma_queue_t queue,
magma_int_t *info )
{
*info = 0;
if ( type != MagmaLower && type != MagmaUpper && type != MagmaFull )
*info = -1;
else if ( cfrom == 0 || isnan(cfrom) )
*info = -4;
else if ( isnan(cto) )
*info = -5;
else if ( m < 0 )
*info = -6;
else if ( n < 0 )
*info = -3;
else if ( ldda < max(1,m) )
*info = -7;
if (*info != 0) {
magma_xerbla( __func__, -(*info) );
return; //info;
}
dim3 grid( magma_ceildiv( m, NB ) );
dim3 threads( NB );
float smlnum, bignum, cfromc, ctoc, cto1, cfrom1, mul;
magma_int_t done = false;
// Uses over/underflow procedure from LAPACK slascl
// Get machine parameters
smlnum = lapackf77_slamch("s");
bignum = 1 / smlnum;
cfromc = cfrom;
ctoc = cto;
int cnt = 0;
while( ! done ) {
cfrom1 = cfromc*smlnum;
if ( cfrom1 == cfromc ) {
// cfromc is an inf. Multiply by a correctly signed zero for
// finite ctoc, or a nan if ctoc is infinite.
mul = ctoc / cfromc;
done = true;
cto1 = ctoc;
}
else {
cto1 = ctoc / bignum;
if ( cto1 == ctoc ) {
// ctoc is either 0 or an inf. In both cases, ctoc itself
// serves as the correct multiplication factor.
mul = ctoc;
done = true;
cfromc = 1;
}
else if ( fabs(cfrom1) > fabs(ctoc) && ctoc != 0 ) {
mul = smlnum;
done = false;
cfromc = cfrom1;
}
else if ( fabs(cto1) > fabs(cfromc) ) {
mul = bignum;
done = false;
ctoc = cto1;
}
else {
mul = ctoc / cfromc;
done = true;
}
}
if (type == MagmaLower) {
hipLaunchKernelGGL(( slascl_lower) , dim3(grid), dim3(threads), 0, queue , m, n, mul, dA, ldda);
}
else if (type == MagmaUpper) {
hipLaunchKernelGGL(( slascl_upper) , dim3(grid), dim3(threads), 0, queue , m, n, mul, dA, ldda);
}
else if (type == MagmaFull) {
hipLaunchKernelGGL(( slascl_full) , dim3(grid), dim3(threads), 0, queue , m, n, mul, dA, ldda);
}
cnt += 1;
}
}
/**
@see magmablas_slascl_q
@ingroup magma_saux2
********************************************************************/
extern "C" void
magmablas_slascl(
magma_type_t type, magma_int_t kl, magma_int_t ku,
float cfrom, float cto,
magma_int_t m, magma_int_t n,
magmaFloat_ptr dA, magma_int_t ldda,
magma_int_t *info )
{
magmablas_slascl_q( type, kl, ku, cfrom, cto, m, n, dA, ldda, magma_stream, info );
}
| f8aa4eacafe4593058f9976ced7a367a5ae7db38.cu | /*
-- MAGMA (version 1.7.0) --
Univ. of Tennessee, Knoxville
Univ. of California, Berkeley
Univ. of Colorado, Denver
@date September 2015
@generated from zlascl.cu normal z -> s, Fri Sep 11 18:29:21 2015
@author Mark Gates
*/
#include "common_magma.h"
#define NB 64
// each thread block does one NB x n block row of A.
// each thread does one row, starting from left edge and moving right.
__global__ void
slascl_full(int m, int n, float mul, float* A, int lda)
{
int ind = blockIdx.x * NB + threadIdx.x;
A += ind;
if (ind < m) {
for (int j=0; j < n; j++ )
A[j*lda] *= mul;
}
}
// each thread block does one NB x n block row of A.
// each thread does one row, starting from left edge and moving right to diagonal.
__global__ void
slascl_lower(int m, int n, float mul, float* A, int lda)
{
int ind = blockIdx.x * NB + threadIdx.x;
int break_d = (ind < n) ? ind : n-1;
A += ind;
if (ind < m) {
for (int j=0; j <= break_d; j++ )
A[j*lda] *= mul;
}
}
// each thread block does one NB x n block row of A.
// each thread does one row, starting from right edge and moving left to diagonal.
__global__ void
slascl_upper(int m, int n, float mul, float* A, int lda)
{
int ind = blockIdx.x * NB + threadIdx.x;
A += ind;
if (ind < m) {
for (int j=n-1; j >= ind; j--)
A[j*lda] *= mul;
}
}
/**
Purpose
-------
SLASCL multiplies the M by N real matrix A by the real scalar
CTO/CFROM. This is done without over/underflow as long as the final
result CTO*A(I,J)/CFROM does not over/underflow. TYPE specifies that
A may be full, upper triangular, lower triangular.
Arguments
---------
@param[in]
type magma_type_t
TYPE indices the storage type of the input matrix A.
= MagmaFull: full matrix.
= MagmaLower: lower triangular matrix.
= MagmaUpper: upper triangular matrix.
Other formats that LAPACK supports, MAGMA does not currently support.
@param[in]
kl INTEGER
Unused, for LAPACK compatability.
@param[in]
ku KU is INTEGER
Unused, for LAPACK compatability.
@param[in]
cfrom REAL
@param[in]
cto REAL
\n
The matrix A is multiplied by CTO/CFROM. A(I,J) is computed
without over/underflow if the final result CTO*A(I,J)/CFROM
can be represented without over/underflow.
CFROM must be nonzero. CFROM and CTO must not be NAN.
@param[in]
m INTEGER
The number of rows of the matrix A. M >= 0.
@param[in]
n INTEGER
The number of columns of the matrix A. N >= 0.
@param[in,out]
dA REAL array, dimension (LDDA,N)
The matrix to be multiplied by CTO/CFROM. See TYPE for the
storage type.
@param[in]
ldda INTEGER
The leading dimension of the array A. LDDA >= max(1,M).
@param[out]
info INTEGER
- = 0: successful exit
- < 0: if INFO = -i, the i-th argument had an illegal value.
@param[in]
queue magma_queue_t
Queue to execute in.
@ingroup magma_saux2
********************************************************************/
extern "C" void
magmablas_slascl_q(
magma_type_t type, magma_int_t kl, magma_int_t ku,
float cfrom, float cto,
magma_int_t m, magma_int_t n,
magmaFloat_ptr dA, magma_int_t ldda,
magma_queue_t queue,
magma_int_t *info )
{
*info = 0;
if ( type != MagmaLower && type != MagmaUpper && type != MagmaFull )
*info = -1;
else if ( cfrom == 0 || isnan(cfrom) )
*info = -4;
else if ( isnan(cto) )
*info = -5;
else if ( m < 0 )
*info = -6;
else if ( n < 0 )
*info = -3;
else if ( ldda < max(1,m) )
*info = -7;
if (*info != 0) {
magma_xerbla( __func__, -(*info) );
return; //info;
}
dim3 grid( magma_ceildiv( m, NB ) );
dim3 threads( NB );
float smlnum, bignum, cfromc, ctoc, cto1, cfrom1, mul;
magma_int_t done = false;
// Uses over/underflow procedure from LAPACK slascl
// Get machine parameters
smlnum = lapackf77_slamch("s");
bignum = 1 / smlnum;
cfromc = cfrom;
ctoc = cto;
int cnt = 0;
while( ! done ) {
cfrom1 = cfromc*smlnum;
if ( cfrom1 == cfromc ) {
// cfromc is an inf. Multiply by a correctly signed zero for
// finite ctoc, or a nan if ctoc is infinite.
mul = ctoc / cfromc;
done = true;
cto1 = ctoc;
}
else {
cto1 = ctoc / bignum;
if ( cto1 == ctoc ) {
// ctoc is either 0 or an inf. In both cases, ctoc itself
// serves as the correct multiplication factor.
mul = ctoc;
done = true;
cfromc = 1;
}
else if ( fabs(cfrom1) > fabs(ctoc) && ctoc != 0 ) {
mul = smlnum;
done = false;
cfromc = cfrom1;
}
else if ( fabs(cto1) > fabs(cfromc) ) {
mul = bignum;
done = false;
ctoc = cto1;
}
else {
mul = ctoc / cfromc;
done = true;
}
}
if (type == MagmaLower) {
slascl_lower <<< grid, threads, 0, queue >>> (m, n, mul, dA, ldda);
}
else if (type == MagmaUpper) {
slascl_upper <<< grid, threads, 0, queue >>> (m, n, mul, dA, ldda);
}
else if (type == MagmaFull) {
slascl_full <<< grid, threads, 0, queue >>> (m, n, mul, dA, ldda);
}
cnt += 1;
}
}
/**
@see magmablas_slascl_q
@ingroup magma_saux2
********************************************************************/
extern "C" void
magmablas_slascl(
magma_type_t type, magma_int_t kl, magma_int_t ku,
float cfrom, float cto,
magma_int_t m, magma_int_t n,
magmaFloat_ptr dA, magma_int_t ldda,
magma_int_t *info )
{
magmablas_slascl_q( type, kl, ku, cfrom, cto, m, n, dA, ldda, magma_stream, info );
}
|
248a9d2a0f43045401ac09c092569655d81c3b0e.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/*
* yolo_layer.cu
*
* This code was originally written by wang-xinyu under MIT license.
* I took it from:
*
* https://github.com/wang-xinyu/tensorrtx/tree/master/yolov4
*
* and made necessary modifications.
*
* - JK Jung
*/
#include "yolo_layer.h"
using namespace Yolo;
namespace
{
// Write values into buffer
template <typename T>
void write(char*& buffer, const T& val)
{
*reinterpret_cast<T*>(buffer) = val;
buffer += sizeof(T);
}
// Read values from buffer
template <typename T>
void read(const char*& buffer, T& val)
{
val = *reinterpret_cast<const T*>(buffer);
buffer += sizeof(T);
}
} // namespace
namespace nvinfer1
{
YoloLayerPlugin::YoloLayerPlugin(int yolo_width, int yolo_height, int num_anchors, float* anchors, int num_classes, int input_width, int input_height, float scale_x_y)
{
mYoloWidth = yolo_width;
mYoloHeight = yolo_height;
mNumAnchors = num_anchors;
memcpy(mAnchorsHost, anchors, num_anchors * 2 * sizeof(float));
mNumClasses = num_classes;
mInputWidth = input_width;
mInputHeight = input_height;
mScaleXY = scale_x_y;
CHECK(hipMalloc(&mAnchors, MAX_ANCHORS * 2 * sizeof(float)));
CHECK(hipMemcpy(mAnchors, mAnchorsHost, mNumAnchors * 2 * sizeof(float), hipMemcpyHostToDevice));
}
YoloLayerPlugin::YoloLayerPlugin(const void* data, size_t length)
{
const char *d = reinterpret_cast<const char *>(data), *a = d;
read(d, mThreadCount);
read(d, mYoloWidth);
read(d, mYoloHeight);
read(d, mNumAnchors);
memcpy(mAnchorsHost, d, MAX_ANCHORS * 2 * sizeof(float));
d += MAX_ANCHORS * 2 * sizeof(float);
read(d, mNumClasses);
read(d, mInputWidth);
read(d, mInputHeight);
read(d, mScaleXY);
CHECK(hipMalloc(&mAnchors, MAX_ANCHORS * 2 * sizeof(float)));
CHECK(hipMemcpy(mAnchors, mAnchorsHost, mNumAnchors * 2 * sizeof(float), hipMemcpyHostToDevice));
assert(d == a + length);
}
void YoloLayerPlugin::serialize(void* buffer) const
{
char* d = static_cast<char*>(buffer), *a = d;
write(d, mThreadCount);
write(d, mYoloWidth);
write(d, mYoloHeight);
write(d, mNumAnchors);
memcpy(d, mAnchorsHost, MAX_ANCHORS * 2 * sizeof(float));
d += MAX_ANCHORS * 2 * sizeof(float);
write(d, mNumClasses);
write(d, mInputWidth);
write(d, mInputHeight);
write(d, mScaleXY);
assert(d == a + getSerializationSize());
}
size_t YoloLayerPlugin::getSerializationSize() const
{
return sizeof(mThreadCount) + \
sizeof(mYoloWidth) + sizeof(mYoloHeight) + \
sizeof(mNumAnchors) + MAX_ANCHORS * 2 * sizeof(float) + \
sizeof(mNumClasses) + \
sizeof(mInputWidth) + sizeof(mInputHeight) + \
sizeof(mScaleXY);
}
int YoloLayerPlugin::initialize()
{
return 0;
}
void YoloLayerPlugin::terminate()
{
CHECK(hipFree(mAnchors));
}
Dims YoloLayerPlugin::getOutputDimensions(int index, const Dims* inputs, int nbInputDims)
{
assert(index == 0);
assert(nbInputDims == 1);
assert(inputs[0].d[0] == (mNumClasses + 5) * mNumAnchors);
assert(inputs[0].d[1] == mYoloHeight);
assert(inputs[0].d[2] == mYoloWidth);
// output detection results to the channel dimension
int totalsize = mYoloWidth * mYoloHeight * mNumAnchors * sizeof(Detection) / sizeof(float);
return Dims3(totalsize, 1, 1);
}
void YoloLayerPlugin::setPluginNamespace(const char* pluginNamespace)
{
mPluginNamespace = pluginNamespace;
}
const char* YoloLayerPlugin::getPluginNamespace() const
{
return mPluginNamespace;
}
// Return the DataType of the plugin output at the requested index
DataType YoloLayerPlugin::getOutputDataType(int index, const nvinfer1::DataType* inputTypes, int nbInputs) const
{
return DataType::kFLOAT;
}
// Return true if output tensor is broadcast across a batch.
bool YoloLayerPlugin::isOutputBroadcastAcrossBatch(int outputIndex, const bool* inputIsBroadcasted, int nbInputs) const
{
return false;
}
// Return true if plugin can use input that is broadcast across batch without replication.
bool YoloLayerPlugin::canBroadcastInputAcrossBatch(int inputIndex) const
{
return false;
}
void YoloLayerPlugin::configurePlugin(const PluginTensorDesc* in, int nbInput, const PluginTensorDesc* out, int nbOutput)
{
}
// Attach the plugin object to an execution context and grant the plugin the access to some context resource.
void YoloLayerPlugin::attachToContext(cudnnContext* cudnnContext, cublasContext* cublasContext, IGpuAllocator* gpuAllocator)
{
}
// Detach the plugin object from its execution context.
void YoloLayerPlugin::detachFromContext()
{
}
const char* YoloLayerPlugin::getPluginType() const
{
return "YoloLayer_TRT";
}
const char* YoloLayerPlugin::getPluginVersion() const
{
return "1";
}
void YoloLayerPlugin::destroy()
{
delete this;
}
// Clone the plugin
IPluginV2IOExt* YoloLayerPlugin::clone() const
{
YoloLayerPlugin *p = new YoloLayerPlugin(mYoloWidth, mYoloHeight, mNumAnchors, (float*) mAnchorsHost, mNumClasses, mInputWidth, mInputHeight, mScaleXY);
p->setPluginNamespace(mPluginNamespace);
return p;
}
inline __device__ float sigmoidGPU(float x) { return 1.0f / (1.0f + __expf(-x)); }
inline __device__ float scale_sigmoidGPU(float x, float scale)
{
return scale * sigmoidGPU(x) - (scale - 1.0f) * 0.5f;
}
// CalDetection(): This kernel processes 1 yolo layer calculation. It
// distributes calculations so that 1 GPU thread would be responsible
// for each grid/anchor combination.
// NOTE: The output (x, y, w, h) are between 0.0 and 1.0
// (relative to orginal image width and height).
__global__ void CalDetection(const float *input, float *output, int yolo_width, int yolo_height, int num_anchors,
const float *anchors, int num_classes, int input_w, int input_h, float scale_x_y)
{
int idx = threadIdx.x + blockDim.x * blockIdx.x;
Detection* det = ((Detection*) output) + idx;
int total_grids = yolo_width * yolo_height;
if (idx >= total_grids * num_anchors) return;
int anchor_idx = idx / total_grids;
idx = idx - total_grids * anchor_idx;
int info_len = 5 + num_classes;
const float* cur_input = input + anchor_idx * (info_len * total_grids);
int class_id;
float max_cls_logit = -CUDART_INF_F; // minus infinity
for (int i = 5; i < info_len; ++i) {
float l = cur_input[idx + i * total_grids];
if (l > max_cls_logit) {
max_cls_logit = l;
class_id = i - 5;
}
}
float max_cls_prob = sigmoidGPU(max_cls_logit);
float box_prob = sigmoidGPU(cur_input[idx + 4 * total_grids]);
//if (max_cls_prob < IGNORE_THRESH || box_prob < IGNORE_THRESH)
// return;
int row = idx / yolo_width;
int col = idx % yolo_width;
det->bbox[0] = (col + scale_sigmoidGPU(cur_input[idx + 0 * total_grids], scale_x_y)) / yolo_width; // [0, 1]
det->bbox[1] = (row + scale_sigmoidGPU(cur_input[idx + 1 * total_grids], scale_x_y)) / yolo_height; // [0, 1]
det->bbox[2] = __expf(cur_input[idx + 2 * total_grids]) * anchors[2 * anchor_idx] / input_w; // [0, 1]
det->bbox[3] = __expf(cur_input[idx + 3 * total_grids]) * anchors[2 * anchor_idx + 1] / input_h; // [0, 1]
det->bbox[0] -= det->bbox[2] / 2; // shift from center to top-left
det->bbox[1] -= det->bbox[3] / 2;
det->det_confidence = box_prob;
det->class_id = class_id;
det->class_confidence = max_cls_prob;
}
void YoloLayerPlugin::forwardGpu(const float* const* inputs, float* output, hipStream_t stream, int batchSize)
{
int num_elements = batchSize * mNumAnchors * mYoloWidth * mYoloHeight;
//CHECK(hipMemset(output, 0, num_elements * sizeof(Detection)));
hipLaunchKernelGGL(( CalDetection), dim3((num_elements + mThreadCount - 1) / mThreadCount), dim3(mThreadCount), 0, stream,
inputs[0], output, mYoloWidth, mYoloHeight, mNumAnchors, (const float*) mAnchors, mNumClasses, mInputWidth, mInputHeight, mScaleXY);
}
int YoloLayerPlugin::enqueue(int batchSize, const void* const* inputs, void** outputs, void* workspace, hipStream_t stream)
{
forwardGpu((const float* const*)inputs, (float*)outputs[0], stream, batchSize);
return 0;
}
YoloPluginCreator::YoloPluginCreator()
{
mPluginAttributes.clear();
mFC.nbFields = mPluginAttributes.size();
mFC.fields = mPluginAttributes.data();
}
const char* YoloPluginCreator::getPluginName() const
{
return "YoloLayer_TRT";
}
const char* YoloPluginCreator::getPluginVersion() const
{
return "1";
}
const PluginFieldCollection* YoloPluginCreator::getFieldNames()
{
return &mFC;
}
IPluginV2IOExt* YoloPluginCreator::createPlugin(const char* name, const PluginFieldCollection* fc)
{
assert(!strcmp(name, getPluginName()));
const PluginField* fields = fc->fields;
int yolo_width, yolo_height, num_anchors = 0;
float anchors[MAX_ANCHORS * 2];
int num_classes;
int input_width, input_height;
float scale_x_y = 1.0;
for (int i = 0; i < fc->nbFields; ++i)
{
const char* attrName = fields[i].name;
if (!strcmp(attrName, "yoloWidth"))
{
assert(fields[i].type == PluginFieldType::kINT32);
yolo_width = *(static_cast<const int*>(fields[i].data));
}
else if (!strcmp(attrName, "yoloHeight"))
{
assert(fields[i].type == PluginFieldType::kINT32);
yolo_height = *(static_cast<const int*>(fields[i].data));
}
else if (!strcmp(attrName, "numAnchors"))
{
assert(fields[i].type == PluginFieldType::kINT32);
num_anchors = *(static_cast<const int*>(fields[i].data));
}
else if (!strcmp(attrName, "numClasses"))
{
assert(fields[i].type == PluginFieldType::kINT32);
num_classes = *(static_cast<const int*>(fields[i].data));
}
else if (!strcmp(attrName, "inputWidth"))
{
assert(fields[i].type == PluginFieldType::kINT32);
input_width = *(static_cast<const int*>(fields[i].data));
}
else if (!strcmp(attrName, "inputHeight"))
{
assert(fields[i].type == PluginFieldType::kINT32);
input_height = *(static_cast<const int*>(fields[i].data));
}
else if (!strcmp(attrName, "anchors")){
assert(num_anchors > 0 && num_anchors <= MAX_ANCHORS);
assert(fields[i].type == PluginFieldType::kFLOAT32);
memcpy(anchors, static_cast<const float*>(fields[i].data), num_anchors * 2 * sizeof(float));
}
else if (!strcmp(attrName, "scaleXY"))
{
assert(fields[i].type == PluginFieldType::kFLOAT32);
scale_x_y = *(static_cast<const float*>(fields[i].data));
}
}
assert(yolo_width > 0 && yolo_height > 0);
assert(anchors[0] > 0.0f && anchors[1] > 0.0f);
assert(num_classes > 0);
assert(input_width > 0 && input_height > 0);
assert(scale_x_y >= 1.0);
YoloLayerPlugin* obj = new YoloLayerPlugin(yolo_width, yolo_height, num_anchors, anchors, num_classes, input_width, input_height, scale_x_y);
obj->setPluginNamespace(mNamespace.c_str());
return obj;
}
IPluginV2IOExt* YoloPluginCreator::deserializePlugin(const char* name, const void* serialData, size_t serialLength)
{
YoloLayerPlugin* obj = new YoloLayerPlugin(serialData, serialLength);
obj->setPluginNamespace(mNamespace.c_str());
return obj;
}
PluginFieldCollection YoloPluginCreator::mFC{};
std::vector<PluginField> YoloPluginCreator::mPluginAttributes;
REGISTER_TENSORRT_PLUGIN(YoloPluginCreator);
} // namespace nvinfer1
| 248a9d2a0f43045401ac09c092569655d81c3b0e.cu | /*
* yolo_layer.cu
*
* This code was originally written by wang-xinyu under MIT license.
* I took it from:
*
* https://github.com/wang-xinyu/tensorrtx/tree/master/yolov4
*
* and made necessary modifications.
*
* - JK Jung
*/
#include "yolo_layer.h"
using namespace Yolo;
namespace
{
// Write values into buffer
template <typename T>
void write(char*& buffer, const T& val)
{
*reinterpret_cast<T*>(buffer) = val;
buffer += sizeof(T);
}
// Read values from buffer
template <typename T>
void read(const char*& buffer, T& val)
{
val = *reinterpret_cast<const T*>(buffer);
buffer += sizeof(T);
}
} // namespace
namespace nvinfer1
{
YoloLayerPlugin::YoloLayerPlugin(int yolo_width, int yolo_height, int num_anchors, float* anchors, int num_classes, int input_width, int input_height, float scale_x_y)
{
mYoloWidth = yolo_width;
mYoloHeight = yolo_height;
mNumAnchors = num_anchors;
memcpy(mAnchorsHost, anchors, num_anchors * 2 * sizeof(float));
mNumClasses = num_classes;
mInputWidth = input_width;
mInputHeight = input_height;
mScaleXY = scale_x_y;
CHECK(cudaMalloc(&mAnchors, MAX_ANCHORS * 2 * sizeof(float)));
CHECK(cudaMemcpy(mAnchors, mAnchorsHost, mNumAnchors * 2 * sizeof(float), cudaMemcpyHostToDevice));
}
YoloLayerPlugin::YoloLayerPlugin(const void* data, size_t length)
{
const char *d = reinterpret_cast<const char *>(data), *a = d;
read(d, mThreadCount);
read(d, mYoloWidth);
read(d, mYoloHeight);
read(d, mNumAnchors);
memcpy(mAnchorsHost, d, MAX_ANCHORS * 2 * sizeof(float));
d += MAX_ANCHORS * 2 * sizeof(float);
read(d, mNumClasses);
read(d, mInputWidth);
read(d, mInputHeight);
read(d, mScaleXY);
CHECK(cudaMalloc(&mAnchors, MAX_ANCHORS * 2 * sizeof(float)));
CHECK(cudaMemcpy(mAnchors, mAnchorsHost, mNumAnchors * 2 * sizeof(float), cudaMemcpyHostToDevice));
assert(d == a + length);
}
void YoloLayerPlugin::serialize(void* buffer) const
{
char* d = static_cast<char*>(buffer), *a = d;
write(d, mThreadCount);
write(d, mYoloWidth);
write(d, mYoloHeight);
write(d, mNumAnchors);
memcpy(d, mAnchorsHost, MAX_ANCHORS * 2 * sizeof(float));
d += MAX_ANCHORS * 2 * sizeof(float);
write(d, mNumClasses);
write(d, mInputWidth);
write(d, mInputHeight);
write(d, mScaleXY);
assert(d == a + getSerializationSize());
}
size_t YoloLayerPlugin::getSerializationSize() const
{
return sizeof(mThreadCount) + \
sizeof(mYoloWidth) + sizeof(mYoloHeight) + \
sizeof(mNumAnchors) + MAX_ANCHORS * 2 * sizeof(float) + \
sizeof(mNumClasses) + \
sizeof(mInputWidth) + sizeof(mInputHeight) + \
sizeof(mScaleXY);
}
int YoloLayerPlugin::initialize()
{
return 0;
}
void YoloLayerPlugin::terminate()
{
CHECK(cudaFree(mAnchors));
}
Dims YoloLayerPlugin::getOutputDimensions(int index, const Dims* inputs, int nbInputDims)
{
assert(index == 0);
assert(nbInputDims == 1);
assert(inputs[0].d[0] == (mNumClasses + 5) * mNumAnchors);
assert(inputs[0].d[1] == mYoloHeight);
assert(inputs[0].d[2] == mYoloWidth);
// output detection results to the channel dimension
int totalsize = mYoloWidth * mYoloHeight * mNumAnchors * sizeof(Detection) / sizeof(float);
return Dims3(totalsize, 1, 1);
}
void YoloLayerPlugin::setPluginNamespace(const char* pluginNamespace)
{
mPluginNamespace = pluginNamespace;
}
const char* YoloLayerPlugin::getPluginNamespace() const
{
return mPluginNamespace;
}
// Return the DataType of the plugin output at the requested index
DataType YoloLayerPlugin::getOutputDataType(int index, const nvinfer1::DataType* inputTypes, int nbInputs) const
{
return DataType::kFLOAT;
}
// Return true if output tensor is broadcast across a batch.
bool YoloLayerPlugin::isOutputBroadcastAcrossBatch(int outputIndex, const bool* inputIsBroadcasted, int nbInputs) const
{
return false;
}
// Return true if plugin can use input that is broadcast across batch without replication.
bool YoloLayerPlugin::canBroadcastInputAcrossBatch(int inputIndex) const
{
return false;
}
void YoloLayerPlugin::configurePlugin(const PluginTensorDesc* in, int nbInput, const PluginTensorDesc* out, int nbOutput)
{
}
// Attach the plugin object to an execution context and grant the plugin the access to some context resource.
void YoloLayerPlugin::attachToContext(cudnnContext* cudnnContext, cublasContext* cublasContext, IGpuAllocator* gpuAllocator)
{
}
// Detach the plugin object from its execution context.
void YoloLayerPlugin::detachFromContext()
{
}
const char* YoloLayerPlugin::getPluginType() const
{
return "YoloLayer_TRT";
}
const char* YoloLayerPlugin::getPluginVersion() const
{
return "1";
}
void YoloLayerPlugin::destroy()
{
delete this;
}
// Clone the plugin
IPluginV2IOExt* YoloLayerPlugin::clone() const
{
YoloLayerPlugin *p = new YoloLayerPlugin(mYoloWidth, mYoloHeight, mNumAnchors, (float*) mAnchorsHost, mNumClasses, mInputWidth, mInputHeight, mScaleXY);
p->setPluginNamespace(mPluginNamespace);
return p;
}
inline __device__ float sigmoidGPU(float x) { return 1.0f / (1.0f + __expf(-x)); }
inline __device__ float scale_sigmoidGPU(float x, float scale)
{
return scale * sigmoidGPU(x) - (scale - 1.0f) * 0.5f;
}
// CalDetection(): This kernel processes 1 yolo layer calculation. It
// distributes calculations so that 1 GPU thread would be responsible
// for each grid/anchor combination.
// NOTE: The output (x, y, w, h) are between 0.0 and 1.0
// (relative to orginal image width and height).
__global__ void CalDetection(const float *input, float *output, int yolo_width, int yolo_height, int num_anchors,
const float *anchors, int num_classes, int input_w, int input_h, float scale_x_y)
{
int idx = threadIdx.x + blockDim.x * blockIdx.x;
Detection* det = ((Detection*) output) + idx;
int total_grids = yolo_width * yolo_height;
if (idx >= total_grids * num_anchors) return;
int anchor_idx = idx / total_grids;
idx = idx - total_grids * anchor_idx;
int info_len = 5 + num_classes;
const float* cur_input = input + anchor_idx * (info_len * total_grids);
int class_id;
float max_cls_logit = -CUDART_INF_F; // minus infinity
for (int i = 5; i < info_len; ++i) {
float l = cur_input[idx + i * total_grids];
if (l > max_cls_logit) {
max_cls_logit = l;
class_id = i - 5;
}
}
float max_cls_prob = sigmoidGPU(max_cls_logit);
float box_prob = sigmoidGPU(cur_input[idx + 4 * total_grids]);
//if (max_cls_prob < IGNORE_THRESH || box_prob < IGNORE_THRESH)
// return;
int row = idx / yolo_width;
int col = idx % yolo_width;
det->bbox[0] = (col + scale_sigmoidGPU(cur_input[idx + 0 * total_grids], scale_x_y)) / yolo_width; // [0, 1]
det->bbox[1] = (row + scale_sigmoidGPU(cur_input[idx + 1 * total_grids], scale_x_y)) / yolo_height; // [0, 1]
det->bbox[2] = __expf(cur_input[idx + 2 * total_grids]) * anchors[2 * anchor_idx] / input_w; // [0, 1]
det->bbox[3] = __expf(cur_input[idx + 3 * total_grids]) * anchors[2 * anchor_idx + 1] / input_h; // [0, 1]
det->bbox[0] -= det->bbox[2] / 2; // shift from center to top-left
det->bbox[1] -= det->bbox[3] / 2;
det->det_confidence = box_prob;
det->class_id = class_id;
det->class_confidence = max_cls_prob;
}
void YoloLayerPlugin::forwardGpu(const float* const* inputs, float* output, cudaStream_t stream, int batchSize)
{
int num_elements = batchSize * mNumAnchors * mYoloWidth * mYoloHeight;
//CHECK(cudaMemset(output, 0, num_elements * sizeof(Detection)));
CalDetection<<<(num_elements + mThreadCount - 1) / mThreadCount, mThreadCount, 0, stream>>>
(inputs[0], output, mYoloWidth, mYoloHeight, mNumAnchors, (const float*) mAnchors, mNumClasses, mInputWidth, mInputHeight, mScaleXY);
}
int YoloLayerPlugin::enqueue(int batchSize, const void* const* inputs, void** outputs, void* workspace, cudaStream_t stream)
{
forwardGpu((const float* const*)inputs, (float*)outputs[0], stream, batchSize);
return 0;
}
YoloPluginCreator::YoloPluginCreator()
{
mPluginAttributes.clear();
mFC.nbFields = mPluginAttributes.size();
mFC.fields = mPluginAttributes.data();
}
const char* YoloPluginCreator::getPluginName() const
{
return "YoloLayer_TRT";
}
const char* YoloPluginCreator::getPluginVersion() const
{
return "1";
}
const PluginFieldCollection* YoloPluginCreator::getFieldNames()
{
return &mFC;
}
IPluginV2IOExt* YoloPluginCreator::createPlugin(const char* name, const PluginFieldCollection* fc)
{
assert(!strcmp(name, getPluginName()));
const PluginField* fields = fc->fields;
int yolo_width, yolo_height, num_anchors = 0;
float anchors[MAX_ANCHORS * 2];
int num_classes;
int input_width, input_height;
float scale_x_y = 1.0;
for (int i = 0; i < fc->nbFields; ++i)
{
const char* attrName = fields[i].name;
if (!strcmp(attrName, "yoloWidth"))
{
assert(fields[i].type == PluginFieldType::kINT32);
yolo_width = *(static_cast<const int*>(fields[i].data));
}
else if (!strcmp(attrName, "yoloHeight"))
{
assert(fields[i].type == PluginFieldType::kINT32);
yolo_height = *(static_cast<const int*>(fields[i].data));
}
else if (!strcmp(attrName, "numAnchors"))
{
assert(fields[i].type == PluginFieldType::kINT32);
num_anchors = *(static_cast<const int*>(fields[i].data));
}
else if (!strcmp(attrName, "numClasses"))
{
assert(fields[i].type == PluginFieldType::kINT32);
num_classes = *(static_cast<const int*>(fields[i].data));
}
else if (!strcmp(attrName, "inputWidth"))
{
assert(fields[i].type == PluginFieldType::kINT32);
input_width = *(static_cast<const int*>(fields[i].data));
}
else if (!strcmp(attrName, "inputHeight"))
{
assert(fields[i].type == PluginFieldType::kINT32);
input_height = *(static_cast<const int*>(fields[i].data));
}
else if (!strcmp(attrName, "anchors")){
assert(num_anchors > 0 && num_anchors <= MAX_ANCHORS);
assert(fields[i].type == PluginFieldType::kFLOAT32);
memcpy(anchors, static_cast<const float*>(fields[i].data), num_anchors * 2 * sizeof(float));
}
else if (!strcmp(attrName, "scaleXY"))
{
assert(fields[i].type == PluginFieldType::kFLOAT32);
scale_x_y = *(static_cast<const float*>(fields[i].data));
}
}
assert(yolo_width > 0 && yolo_height > 0);
assert(anchors[0] > 0.0f && anchors[1] > 0.0f);
assert(num_classes > 0);
assert(input_width > 0 && input_height > 0);
assert(scale_x_y >= 1.0);
YoloLayerPlugin* obj = new YoloLayerPlugin(yolo_width, yolo_height, num_anchors, anchors, num_classes, input_width, input_height, scale_x_y);
obj->setPluginNamespace(mNamespace.c_str());
return obj;
}
IPluginV2IOExt* YoloPluginCreator::deserializePlugin(const char* name, const void* serialData, size_t serialLength)
{
YoloLayerPlugin* obj = new YoloLayerPlugin(serialData, serialLength);
obj->setPluginNamespace(mNamespace.c_str());
return obj;
}
PluginFieldCollection YoloPluginCreator::mFC{};
std::vector<PluginField> YoloPluginCreator::mPluginAttributes;
REGISTER_TENSORRT_PLUGIN(YoloPluginCreator);
} // namespace nvinfer1
|
45462379dd9bec34d89e72e745c617ef365cb937.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#define TORCH_ASSERT_ONLY_METHOD_OPERATORS
#include <ATen/core/Tensor.h>
#include <ATen/AccumulateType.h>
#include <ATen/ceil_div.h>
#include <ATen/Dispatch.h>
#include <ATen/hip/Atomic.cuh>
#include <ATen/hip/HIPContext.h>
#include <ATen/hip/DeviceUtils.cuh>
#include <ATen/TensorUtils.h>
#ifndef AT_PER_OPERATOR_HEADERS
#include <ATen/Functions.h>
#include <ATen/NativeFunctions.h>
#else
#include <ATen/ops/arange.h>
#include <ATen/ops/empty.h>
#include <ATen/ops/empty_like.h>
#include <ATen/ops/zeros.h>
#include <ATen/ops/_embedding_bag_native.h>
#include <ATen/ops/_embedding_bag_forward_only_native.h>
#include <ATen/ops/_embedding_bag_dense_backward_native.h>
#include <ATen/ops/_embedding_bag_per_sample_weights_backward_native.h>
#endif
#include <ATen/hip/cub.cuh>
#include <ATen/native/hip/SortingCommon.cuh>
#include <ATen/native/hip/EmbeddingBackwardKernel.cuh>
#include <ATen/native/hip/KernelUtils.cuh>
#include <c10/macros/Macros.h>
#if CUB_SUPPORTS_SCAN_BY_KEY()
#include <thrust/iterator/reverse_iterator.h>
#endif
namespace at {
namespace native {
#if !CUB_SUPPORTS_SCAN_BY_KEY()
template<typename index_t>
void embedding_dense_backward_cuda_scan(Tensor &sorted_indices, Tensor &count);
#endif
namespace {
constexpr int MODE_SUM = 0;
constexpr int MODE_MEAN = 1;
constexpr int MODE_MAX = 2;
std::pair<Tensor, Tensor> promoteIndicesAndOffsets(
const Tensor& indices,
const Tensor& offsets) {
const auto commonType =
promoteTypes(offsets.scalar_type(), indices.scalar_type());
return {
indices.scalar_type() == commonType ? indices
: indices.toType(commonType),
offsets.scalar_type() == commonType ? offsets
: offsets.toType(commonType)};
}
// This kernel assumes that all input tensors except `weight` and
// per_sample_weights are contiguous.
template <typename scalar_t, typename index_t>
__global__ void EmbeddingBag_updateOutputKernel_max(
index_t *input, index_t *offsets, scalar_t *weight, scalar_t *output,
index_t *offset2bag, int64_t numIndices, int64_t numBags,
int64_t featureSize, int64_t weight_stride0, int64_t weight_stride1,
index_t *bag_size, index_t *max_indices,
index_t padding_idx) {
// the strategy here is that each bag x feature is handled by a single thread
int64_t chunksPerBag = ceil_div(featureSize, (int64_t)blockDim.x);
int64_t numChunks = numBags * chunksPerBag;
int64_t chunkOffset = blockIdx.x * blockDim.y + threadIdx.y;
int64_t chunkStride = gridDim.x * blockDim.y;
for (int64_t chunk = chunkOffset; chunk < numChunks; chunk += chunkStride) {
int64_t featureDim = (chunk % chunksPerBag) * blockDim.x + threadIdx.x;
if (featureDim < featureSize) {
int64_t bag = chunk / chunksPerBag;
scalar_t *weightFeat = weight + featureDim * weight_stride1;
int64_t begin = bag == 0 ? 0 : offsets[bag]; // forces first offset to be 0 instead of asserting on it
int64_t end = (bag < numBags - 1) ? (offsets[bag + 1]) : numIndices;
CUDA_KERNEL_ASSERT(end >= begin);
scalar_t weightFeatMax = 0;
int64_t bag_size_ = 0;
int64_t maxWord = -1;
for (int64_t emb = begin; emb < end; emb++) {
bool pad = (input[emb] == padding_idx);
const int64_t weightRow = input[emb] * weight_stride0;
scalar_t weightValue = weightFeat[weightRow];
if (bag_size_ == 0 || weightValue > weightFeatMax) {
weightFeatMax = pad ? weightFeatMax : weightValue;
maxWord = pad ? maxWord : input[emb];
}
bag_size_ += pad ? 0 : 1;
if (featureDim == 0) {
offset2bag[emb] = bag;
}
}
bag_size[bag] = bag_size_;
max_indices[bag * featureSize + featureDim] = maxWord;
output[bag * featureSize + featureDim] = weightFeatMax;
}
}
}
// This kernel assumes that all input tensors except `weight` and
// per_sample_weights are contiguous.
template <typename scalar_t, typename index_t>
__global__ void EmbeddingBag_updateOutputKernel_sum_mean(
index_t *input, index_t *offsets, scalar_t *weight, scalar_t *output,
index_t *offset2bag, int64_t numIndices, int64_t numBags,
int64_t featureSize, int64_t weight_stride0, int64_t weight_stride1,
int mode, index_t *bag_size,
scalar_t* per_sample_weights, int64_t per_sample_weights_stride,
index_t padding_idx) {
// the strategy here is that each bag x feature is handled by a single thread
using accscalar_t = acc_type<scalar_t, true>;
int64_t chunksPerBag = ceil_div(featureSize, (int64_t)blockDim.x);
int64_t numChunks = numBags * chunksPerBag;
int64_t chunkOffset = blockIdx.x * blockDim.y + threadIdx.y;
int64_t chunkStride = gridDim.x * blockDim.y;
for (int64_t chunk = chunkOffset; chunk < numChunks; chunk += chunkStride) {
int64_t featureDim = (chunk % chunksPerBag) * blockDim.x + threadIdx.x;
if (featureDim < featureSize) {
int64_t bag = chunk / chunksPerBag;
scalar_t *weightFeat = weight + featureDim * weight_stride1;
int64_t begin = bag == 0 ? 0 : offsets[bag]; // forces first offset to be 0 instead of asserting on it
int64_t end = (bag < numBags - 1) ? (offsets[bag + 1]) : numIndices;
CUDA_KERNEL_ASSERT(end >= begin);
accscalar_t weightFeatSum = 0;
int64_t bag_size_ = 0;
for (int64_t emb = begin; emb < end; emb++) {
bool pad = (input[emb] == padding_idx);
const int64_t weightRow = input[emb] * weight_stride0;
scalar_t weightValue = weightFeat[weightRow];
weightValue = pad ? static_cast<scalar_t>(0) : weightValue;
if (per_sample_weights) {
accscalar_t scaleWeightBy = static_cast<accscalar_t>(
per_sample_weights[emb * per_sample_weights_stride]);
weightFeatSum += scaleWeightBy * static_cast<accscalar_t>(weightValue);
} else {
weightFeatSum += static_cast<accscalar_t>(weightValue);
}
bag_size_ += pad ? 0 : 1;
if (featureDim == 0) {
offset2bag[emb] = bag;
}
}
if (mode == MODE_MEAN) {
if (bag_size_ != 0) {
weightFeatSum = weightFeatSum / static_cast<accscalar_t>(bag_size_);
}
}
bag_size[bag] = bag_size_;
output[bag * featureSize + featureDim] = static_cast<scalar_t>(weightFeatSum);
}
}
}
Tensor embedding_bag_backward_cuda_sum_avg(
const Tensor &grad,
const Tensor &indices_,
const Tensor &offset2bag,
const Tensor &bag_size,
int64_t num_weights,
bool scale_grad_by_freq, int64_t mode,
const Tensor& per_sample_weights,
int64_t padding_idx) {
auto indices = indices_.contiguous();
ptrdiff_t num_indices = indices.numel();
if (num_indices == 0) {
// all empty bags
return at::zeros({num_weights, grad.size(1)}, grad.options());
}
auto sorted_indices = at::empty_like(indices, LEGACY_CONTIGUOUS_MEMORY_FORMAT);
auto orig_indices = at::empty_like(indices, LEGACY_CONTIGUOUS_MEMORY_FORMAT);
Tensor count;
AT_DISPATCH_INDEX_TYPES(indices.scalar_type(), "embedding_bag_backward_cuda_sum_avg", [&] () {
auto range = at::arange(num_indices, indices.options());
// int64_t nbits = cuda::cub::get_num_bits(num_weights);
cuda::cub::radix_sort_pairs(
indices.data_ptr<index_t>(), sorted_indices.data_ptr<index_t>(),
range.data_ptr<index_t>(), orig_indices.data_ptr<index_t>(),
num_indices, false/*, 0, nbits*/);
});
if (scale_grad_by_freq) {
count = at::empty_like(indices, LEGACY_CONTIGUOUS_MEMORY_FORMAT);
#if CUB_SUPPORTS_SCAN_BY_KEY()
AT_DISPATCH_INDEX_TYPES(indices.scalar_type(), "embedding_bag_backward_cuda_sum_avg", [&] () {
hipStream_t stream = at::hip::getCurrentHIPStreamMasqueradingAsCUDA();
// Compute an increasing sequence per unique item in sortedIndices:
// sorted: 2 5 5 5 7 7 8 9 9
// count: 1 1 2 3 1 2 1 1 2
auto sorted_data = sorted_indices.data_ptr<index_t>();
auto count_data = count.data_ptr<index_t>();
cuda::cub::inclusive_sum_by_key(
sorted_data,
at_cuda_detail::cub::ConstantInputIterator<index_t>(1),
count_data,
num_indices
);
// Take the maximum of each count per unique key in reverse:
// sorted: 2 5 5 5 7 7 8 9 9
// count: 1 3 3 3 2 2 1 2 2
cuda::cub::inclusive_scan_by_key(
thrust::make_reverse_iterator(sorted_data + num_indices),
thrust::make_reverse_iterator(count_data + num_indices),
thrust::make_reverse_iterator(count_data + num_indices),
at_cuda_detail::hipcub::Max(),
num_indices
);
});
#else
AT_DISPATCH_INDEX_TYPES(indices.scalar_type(), "embedding_bag_backward_cuda_sum_avg", [&] () {
embedding_dense_backward_cuda_scan<index_t>(sorted_indices, count);
});
#endif
}
return embedding_backward_cuda_kernel(grad, orig_indices, sorted_indices,
count, num_weights, padding_idx, mode == MODE_MEAN, offset2bag,
bag_size, per_sample_weights);
}
template <typename scalar_t, typename index_t>
__global__ void EmbeddingBag_accGradParametersKernel_max(
index_t *max_indices, scalar_t *gradOutput,
scalar_t *gradWeight, int64_t stride, int64_t numBags,
index_t padding_idx, const index_t numel) {
using accscalar_t = acc_type<scalar_t, true>;
int64_t chunksPerBag = ceil_div(stride, (int64_t)blockDim.x);
int64_t numChunks = numBags * chunksPerBag;
int64_t chunkOffset = blockIdx.x * blockDim.y + threadIdx.y;
int64_t chunkStride = gridDim.x * blockDim.y;
for (int64_t chunk = chunkOffset; chunk < numChunks; chunk += chunkStride) {
int64_t featureDim = (chunk % chunksPerBag) * blockDim.x + threadIdx.x;
if (featureDim < stride) {
int64_t bag = chunk / chunksPerBag;
index_t word_idx = max_indices[bag * stride + featureDim];
if (word_idx >= 0 && word_idx != padding_idx) {
// If bag is empty, we have max_indices[idx] set to -1 in forward.
fastAtomicAdd(
gradWeight, static_cast<index_t>(word_idx * stride + featureDim),
numel, gradOutput[bag * stride + featureDim], true);
}
}
}
}
Tensor embedding_bag_backward_cuda_max(const Tensor &grad,
const Tensor &max_indices,
int64_t num_weights,
int64_t padding_idx) {
// See Note [Writing Nondeterministic Operations]
// Nondeterministic because of atomicAdd usage
globalContext().alertNotDeterministic("embedding_bag_backward_cuda_max");
auto grad_weight = at::zeros({num_weights, grad.size(1)}, grad.options());
int64_t stride = grad_weight.stride(0);
int64_t numBags = grad.size(0);
hipStream_t stream = at::hip::getCurrentHIPStreamMasqueradingAsCUDA();
#if defined(USE_ROCM)
dim3 block = dim3(64, 4);
#else
dim3 block = dim3(32, 8);
#endif
int grid = 1024;
AT_DISPATCH_FLOATING_TYPES_AND_HALF(
grad.scalar_type(), "embedding_bag_backward_cuda_max", [&] {
AT_DISPATCH_INDEX_TYPES(max_indices.scalar_type(), "embedding_bag_backward_cuda_max", [&] () {
hipLaunchKernelGGL(( EmbeddingBag_accGradParametersKernel_max<
scalar_t, index_t>), dim3(grid), dim3(block), 0, stream,
max_indices.data_ptr<index_t>(), grad.data_ptr<scalar_t>(),
grad_weight.data_ptr<scalar_t>(), stride, numBags,
padding_idx, grad_weight.numel());
C10_HIP_KERNEL_LAUNCH_CHECK();
});
});
return grad_weight;
}
}
// Assumes all input tensors are contiguous.
// See NOTE [ embedding_bag Native Functions ] in native_functions.yaml for details
std::tuple<Tensor, Tensor, Tensor, Tensor>
_embedding_bag_forward_only_cuda(const Tensor &weight, const Tensor &indices,
const Tensor &offsets, const bool scale_grad_by_freq,
const int64_t mode, bool sparse, const c10::optional<Tensor>& per_sample_weights_opt,
bool include_last_offset, int64_t padding_idx) {
// See [Note: hacky wrapper removal for optional tensor]
c10::MaybeOwned<Tensor> per_sample_weights_maybe_owned = at::borrow_from_optional_tensor(per_sample_weights_opt);
const Tensor& per_sample_weights = *per_sample_weights_maybe_owned;
return _embedding_bag_cuda(
weight,
indices,
offsets,
scale_grad_by_freq,
mode,
sparse,
per_sample_weights,
include_last_offset,
padding_idx);
}
// Assumes all input tensors are contiguous.
// See NOTE [ embedding_bag Native Functions ] in native_functions.yaml for details
std::tuple<Tensor, Tensor, Tensor, Tensor>
_embedding_bag_cuda(const Tensor &weight, const Tensor &indices_,
const Tensor &offsets_, const bool scale_grad_by_freq,
const int64_t mode, bool sparse, const c10::optional<Tensor>& per_sample_weights_opt,
bool include_last_offset, int64_t padding_idx) {
// See [Note: hacky wrapper removal for optional tensor]
c10::MaybeOwned<Tensor> per_sample_weights_maybe_owned = at::borrow_from_optional_tensor(per_sample_weights_opt);
const Tensor& per_sample_weights = *per_sample_weights_maybe_owned;
Tensor indices, offsets;
std::tie(indices, offsets) = promoteIndicesAndOffsets(indices_, offsets_);
auto indices_arg = TensorArg(indices, "indices", 1);
checkScalarTypes("embedding_bag_cuda", indices_arg, {kLong, kInt});
auto offsets_arg = TensorArg(offsets, "offsets", 1);
checkScalarTypes("embedding_bag_cuda", offsets_arg, {kLong, kInt});
checkSameType("embedding_bag_cuda", indices_arg, offsets_arg);
auto weight_arg = TensorArg(weight, "weight", 1);
checkSameGPU("embedding_bag_cuda", weight_arg, indices_arg);
checkSameGPU("embedding_bag_cuda", weight_arg, offsets_arg);
int64_t numIndices = indices.size(0);
int64_t numBags = offsets.size(0);
if (include_last_offset) {
// Check https://github.com/pytorch/pytorch/issues/29019
// We plan to add one more element in offsets, which is equal to the size of
// indices. Currently for cuda devices, we still use the legacy
// implementation even this flag is enabled.
TORCH_CHECK(
numBags >= 1, "include_last_offset: numBags should be at least 1");
numBags -= 1;
}
int64_t featureSize = weight.size(1);
auto bag_size = at::empty(offsets.sizes(), indices.options());
auto offset2bag =
at::empty({indices.size(0)}, indices.options()); // offset2bag = [0 0 0 0 0]
hipStream_t stream = at::hip::getCurrentHIPStreamMasqueradingAsCUDA();
auto output = at::empty({numBags, featureSize}, weight.options());
Tensor max_indices;
if (mode == MODE_MAX) {
max_indices = at::empty({numBags, featureSize}, indices.options());
} else {
// No need to allocate if we aren't doing a backwards pass
max_indices = at::empty({0}, indices.options());
}
#if defined(USE_ROCM)
dim3 block = dim3(64, 4);
#else
dim3 block = dim3(32, 8);
#endif
int grid = 1024;
AT_DISPATCH_FLOATING_TYPES_AND2(at::ScalarType::Half, at::ScalarType::BFloat16, weight.scalar_type(), "embedding_bag_cuda", [&] {
AT_DISPATCH_INDEX_TYPES(indices.scalar_type(), "embedding_bag_cuda", [&] () {
if (mode == MODE_MAX) {
hipLaunchKernelGGL(( EmbeddingBag_updateOutputKernel_max<scalar_t, index_t>), dim3(grid), dim3(block), 0, stream,
indices.data_ptr<index_t>(), offsets.data_ptr<index_t>(),
weight.data_ptr<scalar_t>(), output.data_ptr<scalar_t>(),
offset2bag.data_ptr<index_t>(), numIndices, numBags, featureSize,
weight.stride(0), weight.stride(1), bag_size.data_ptr<index_t>(),
max_indices.data_ptr<index_t>(),
padding_idx);
C10_HIP_KERNEL_LAUNCH_CHECK();
} else {
hipLaunchKernelGGL(( EmbeddingBag_updateOutputKernel_sum_mean<scalar_t, index_t>), dim3(grid), dim3(block), 0, stream,
indices.data_ptr<index_t>(), offsets.data_ptr<index_t>(),
weight.data_ptr<scalar_t>(), output.data_ptr<scalar_t>(),
offset2bag.data_ptr<index_t>(), numIndices, numBags, featureSize,
weight.stride(0), weight.stride(1), mode, bag_size.data_ptr<index_t>(),
per_sample_weights.defined() ? per_sample_weights.data_ptr<scalar_t>() : NULL,
per_sample_weights.defined() ? per_sample_weights.stride(0) : 0,
padding_idx);
C10_HIP_KERNEL_LAUNCH_CHECK();
}
});
});
return std::tuple<Tensor, Tensor, Tensor, Tensor>(output, offset2bag, bag_size, max_indices);
}
Tensor _embedding_bag_dense_backward_cuda(const Tensor &grad_, const Tensor &indices,
const Tensor &offset2bag,
const Tensor &bag_size_,
const Tensor &max_indices,
int64_t num_weights,
bool scale_grad_by_freq, int64_t mode, const c10::optional<Tensor>& per_sample_weights_opt,
int64_t padding_idx) {
// See [Note: hacky wrapper removal for optional tensor]
c10::MaybeOwned<Tensor> per_sample_weights_maybe_owned = at::borrow_from_optional_tensor(per_sample_weights_opt);
const Tensor& per_sample_weights = *per_sample_weights_maybe_owned;
// indices, offsets and offset2bag are assumed having correct dtypes and
// contiguous here due to the checks in _embedding_bag_backward in
// EmbeddingBag.cpp.
// Also see NOTE [ embedding_bag Native Functions ] in native_functions.yaml
// for more details.
Tensor grad = grad_.contiguous();
auto indices_arg = TensorArg(indices, "indices", 1);
auto grad_arg = TensorArg(grad, "grad", 1);
checkSameGPU("embedding_bag_cuda", grad_arg, indices_arg);
switch (mode) {
case MODE_SUM:
case MODE_MEAN:
if (mode == MODE_MEAN)
AT_ASSERT(!per_sample_weights.defined());
return embedding_bag_backward_cuda_sum_avg(grad, indices, offset2bag,
bag_size_, num_weights, scale_grad_by_freq, mode,
per_sample_weights, padding_idx);
case MODE_MAX:
AT_ASSERT(!per_sample_weights.defined());
return embedding_bag_backward_cuda_max(grad, max_indices, num_weights,
padding_idx);
default:
AT_ERROR(
"Unknown mode for embedding_bag_backward_cuda ", mode);
}
}
template <typename scalar_t>
__inline__ __device__
static scalar_t warpReduceSum(scalar_t val) {
for (int offset = C10_WARP_SIZE/2; offset > 0; offset /= 2)
val += WARP_SHFL_DOWN(val, offset);
return val;
}
template <typename scalar_t, typename index_t>
__global__ static void _embedding_bag_per_sample_weights_backward_kernel(
const scalar_t* grad, int64_t grad_stride0, int64_t grad_stride1,
const scalar_t* weight, int64_t weight_stride0, int64_t weight_stride1,
const index_t* indices, // contiguous
const index_t* offset2bag, // contiguous
int64_t num_samples,
int64_t embedding_features,
scalar_t* output,
index_t padding_idx) {
using accscalar_t = acc_type<scalar_t, true>;
const int idx = threadIdx.x + blockIdx.x * blockDim.x;
const int warp = idx / C10_WARP_SIZE;
const int thread_in_warp = idx % C10_WARP_SIZE;
const int num_warps = blockDim.x * gridDim.x / C10_WARP_SIZE;
// Each warp is responsible for the accumulation of one sample.
// This involves doing one dot product between grad[bag_idx] and weight[embedding_idx].
for (int sample_idx = warp; sample_idx < num_samples; sample_idx += num_warps) {
accscalar_t result = 0.;
const int bag_idx = (int)offset2bag[sample_idx];
const int embedding_idx = (int)indices[sample_idx];
if (embedding_idx != padding_idx) {
for (int feature_idx = thread_in_warp; feature_idx < embedding_features;
feature_idx += C10_WARP_SIZE) {
result +=
grad[grad_stride0 * bag_idx + grad_stride1 * feature_idx] *
weight[weight_stride0 * embedding_idx + weight_stride1 * feature_idx];
}
}
result = warpReduceSum<accscalar_t>(result);
if (thread_in_warp == 0) {
output[sample_idx] = result;
}
}
}
Tensor _embedding_bag_per_sample_weights_backward_cuda(
const Tensor& grad,
const Tensor& weight, // NB: embedding table, not per_sample_weights
const Tensor& indices_,
const Tensor& offsets_,
const Tensor& offset2bag,
int64_t mode,
int64_t padding_idx) {
TORCH_CHECK(
mode == MODE_SUM,
"embedding_bag_backward: per_sample_weights only supported for mode='sum'");
AT_ASSERT(grad.dim() == 2);
auto embedding_features = grad.size(1);
Tensor indices, offsets;
std::tie(indices, offsets) = promoteIndicesAndOffsets(indices_, offsets_);
AT_ASSERT(indices.dim() == 1);
auto num_samples = indices.size(0);
AT_ASSERT(weight.dim() == 2);
AT_ASSERT(weight.size(1) == embedding_features);
const int threads_per_block = 512;
const int warps_per_block = threads_per_block / at::cuda::warp_size();
dim3 block(threads_per_block);
dim3 grid((num_samples + warps_per_block - 1) / warps_per_block);
auto output = at::empty({num_samples}, grad.options());
// Early return when there is no samples in the batch. This saves unnecesary kernel
// launch, but also prevents hipGetLastError() to complain about invalid launch args
if (num_samples == 0) {
return output;
}
AT_DISPATCH_FLOATING_TYPES_AND_HALF(
grad.scalar_type(), "_embedding_bag_per_sample_weights_backward_cuda", [&]() {
AT_DISPATCH_INDEX_TYPES(indices.scalar_type(), "_embedding_bag_per_sample_weights_backward_cuda", [&]() {
hipLaunchKernelGGL(( _embedding_bag_per_sample_weights_backward_kernel<scalar_t, index_t>)
, dim3(grid), dim3(block), 0, at::hip::getCurrentHIPStreamMasqueradingAsCUDA(),
grad.data_ptr<scalar_t>(), grad.stride(0), grad.stride(1),
weight.data_ptr<scalar_t>(), weight.stride(0), weight.stride(1),
indices.data_ptr<index_t>(),
offset2bag.data_ptr<index_t>(),
num_samples,
embedding_features,
output.data_ptr<scalar_t>(),
padding_idx);
C10_HIP_KERNEL_LAUNCH_CHECK();
});
}
);
return output;
}
}
}
| 45462379dd9bec34d89e72e745c617ef365cb937.cu | #define TORCH_ASSERT_ONLY_METHOD_OPERATORS
#include <ATen/core/Tensor.h>
#include <ATen/AccumulateType.h>
#include <ATen/ceil_div.h>
#include <ATen/Dispatch.h>
#include <ATen/cuda/Atomic.cuh>
#include <ATen/cuda/CUDAContext.h>
#include <ATen/cuda/DeviceUtils.cuh>
#include <ATen/TensorUtils.h>
#ifndef AT_PER_OPERATOR_HEADERS
#include <ATen/Functions.h>
#include <ATen/NativeFunctions.h>
#else
#include <ATen/ops/arange.h>
#include <ATen/ops/empty.h>
#include <ATen/ops/empty_like.h>
#include <ATen/ops/zeros.h>
#include <ATen/ops/_embedding_bag_native.h>
#include <ATen/ops/_embedding_bag_forward_only_native.h>
#include <ATen/ops/_embedding_bag_dense_backward_native.h>
#include <ATen/ops/_embedding_bag_per_sample_weights_backward_native.h>
#endif
#include <ATen/cuda/cub.cuh>
#include <ATen/native/cuda/SortingCommon.cuh>
#include <ATen/native/cuda/EmbeddingBackwardKernel.cuh>
#include <ATen/native/cuda/KernelUtils.cuh>
#include <c10/macros/Macros.h>
#if CUB_SUPPORTS_SCAN_BY_KEY()
#include <thrust/iterator/reverse_iterator.h>
#endif
namespace at {
namespace native {
#if !CUB_SUPPORTS_SCAN_BY_KEY()
template<typename index_t>
void embedding_dense_backward_cuda_scan(Tensor &sorted_indices, Tensor &count);
#endif
namespace {
constexpr int MODE_SUM = 0;
constexpr int MODE_MEAN = 1;
constexpr int MODE_MAX = 2;
std::pair<Tensor, Tensor> promoteIndicesAndOffsets(
const Tensor& indices,
const Tensor& offsets) {
const auto commonType =
promoteTypes(offsets.scalar_type(), indices.scalar_type());
return {
indices.scalar_type() == commonType ? indices
: indices.toType(commonType),
offsets.scalar_type() == commonType ? offsets
: offsets.toType(commonType)};
}
// This kernel assumes that all input tensors except `weight` and
// per_sample_weights are contiguous.
template <typename scalar_t, typename index_t>
__global__ void EmbeddingBag_updateOutputKernel_max(
index_t *input, index_t *offsets, scalar_t *weight, scalar_t *output,
index_t *offset2bag, int64_t numIndices, int64_t numBags,
int64_t featureSize, int64_t weight_stride0, int64_t weight_stride1,
index_t *bag_size, index_t *max_indices,
index_t padding_idx) {
// the strategy here is that each bag x feature is handled by a single thread
int64_t chunksPerBag = ceil_div(featureSize, (int64_t)blockDim.x);
int64_t numChunks = numBags * chunksPerBag;
int64_t chunkOffset = blockIdx.x * blockDim.y + threadIdx.y;
int64_t chunkStride = gridDim.x * blockDim.y;
for (int64_t chunk = chunkOffset; chunk < numChunks; chunk += chunkStride) {
int64_t featureDim = (chunk % chunksPerBag) * blockDim.x + threadIdx.x;
if (featureDim < featureSize) {
int64_t bag = chunk / chunksPerBag;
scalar_t *weightFeat = weight + featureDim * weight_stride1;
int64_t begin = bag == 0 ? 0 : offsets[bag]; // forces first offset to be 0 instead of asserting on it
int64_t end = (bag < numBags - 1) ? (offsets[bag + 1]) : numIndices;
CUDA_KERNEL_ASSERT(end >= begin);
scalar_t weightFeatMax = 0;
int64_t bag_size_ = 0;
int64_t maxWord = -1;
for (int64_t emb = begin; emb < end; emb++) {
bool pad = (input[emb] == padding_idx);
const int64_t weightRow = input[emb] * weight_stride0;
scalar_t weightValue = weightFeat[weightRow];
if (bag_size_ == 0 || weightValue > weightFeatMax) {
weightFeatMax = pad ? weightFeatMax : weightValue;
maxWord = pad ? maxWord : input[emb];
}
bag_size_ += pad ? 0 : 1;
if (featureDim == 0) {
offset2bag[emb] = bag;
}
}
bag_size[bag] = bag_size_;
max_indices[bag * featureSize + featureDim] = maxWord;
output[bag * featureSize + featureDim] = weightFeatMax;
}
}
}
// This kernel assumes that all input tensors except `weight` and
// per_sample_weights are contiguous.
template <typename scalar_t, typename index_t>
__global__ void EmbeddingBag_updateOutputKernel_sum_mean(
index_t *input, index_t *offsets, scalar_t *weight, scalar_t *output,
index_t *offset2bag, int64_t numIndices, int64_t numBags,
int64_t featureSize, int64_t weight_stride0, int64_t weight_stride1,
int mode, index_t *bag_size,
scalar_t* per_sample_weights, int64_t per_sample_weights_stride,
index_t padding_idx) {
// the strategy here is that each bag x feature is handled by a single thread
using accscalar_t = acc_type<scalar_t, true>;
int64_t chunksPerBag = ceil_div(featureSize, (int64_t)blockDim.x);
int64_t numChunks = numBags * chunksPerBag;
int64_t chunkOffset = blockIdx.x * blockDim.y + threadIdx.y;
int64_t chunkStride = gridDim.x * blockDim.y;
for (int64_t chunk = chunkOffset; chunk < numChunks; chunk += chunkStride) {
int64_t featureDim = (chunk % chunksPerBag) * blockDim.x + threadIdx.x;
if (featureDim < featureSize) {
int64_t bag = chunk / chunksPerBag;
scalar_t *weightFeat = weight + featureDim * weight_stride1;
int64_t begin = bag == 0 ? 0 : offsets[bag]; // forces first offset to be 0 instead of asserting on it
int64_t end = (bag < numBags - 1) ? (offsets[bag + 1]) : numIndices;
CUDA_KERNEL_ASSERT(end >= begin);
accscalar_t weightFeatSum = 0;
int64_t bag_size_ = 0;
for (int64_t emb = begin; emb < end; emb++) {
bool pad = (input[emb] == padding_idx);
const int64_t weightRow = input[emb] * weight_stride0;
scalar_t weightValue = weightFeat[weightRow];
weightValue = pad ? static_cast<scalar_t>(0) : weightValue;
if (per_sample_weights) {
accscalar_t scaleWeightBy = static_cast<accscalar_t>(
per_sample_weights[emb * per_sample_weights_stride]);
weightFeatSum += scaleWeightBy * static_cast<accscalar_t>(weightValue);
} else {
weightFeatSum += static_cast<accscalar_t>(weightValue);
}
bag_size_ += pad ? 0 : 1;
if (featureDim == 0) {
offset2bag[emb] = bag;
}
}
if (mode == MODE_MEAN) {
if (bag_size_ != 0) {
weightFeatSum = weightFeatSum / static_cast<accscalar_t>(bag_size_);
}
}
bag_size[bag] = bag_size_;
output[bag * featureSize + featureDim] = static_cast<scalar_t>(weightFeatSum);
}
}
}
Tensor embedding_bag_backward_cuda_sum_avg(
const Tensor &grad,
const Tensor &indices_,
const Tensor &offset2bag,
const Tensor &bag_size,
int64_t num_weights,
bool scale_grad_by_freq, int64_t mode,
const Tensor& per_sample_weights,
int64_t padding_idx) {
auto indices = indices_.contiguous();
ptrdiff_t num_indices = indices.numel();
if (num_indices == 0) {
// all empty bags
return at::zeros({num_weights, grad.size(1)}, grad.options());
}
auto sorted_indices = at::empty_like(indices, LEGACY_CONTIGUOUS_MEMORY_FORMAT);
auto orig_indices = at::empty_like(indices, LEGACY_CONTIGUOUS_MEMORY_FORMAT);
Tensor count;
AT_DISPATCH_INDEX_TYPES(indices.scalar_type(), "embedding_bag_backward_cuda_sum_avg", [&] () {
auto range = at::arange(num_indices, indices.options());
// int64_t nbits = cuda::cub::get_num_bits(num_weights);
cuda::cub::radix_sort_pairs(
indices.data_ptr<index_t>(), sorted_indices.data_ptr<index_t>(),
range.data_ptr<index_t>(), orig_indices.data_ptr<index_t>(),
num_indices, false/*, 0, nbits*/);
});
if (scale_grad_by_freq) {
count = at::empty_like(indices, LEGACY_CONTIGUOUS_MEMORY_FORMAT);
#if CUB_SUPPORTS_SCAN_BY_KEY()
AT_DISPATCH_INDEX_TYPES(indices.scalar_type(), "embedding_bag_backward_cuda_sum_avg", [&] () {
cudaStream_t stream = at::cuda::getCurrentCUDAStream();
// Compute an increasing sequence per unique item in sortedIndices:
// sorted: 2 5 5 5 7 7 8 9 9
// count: 1 1 2 3 1 2 1 1 2
auto sorted_data = sorted_indices.data_ptr<index_t>();
auto count_data = count.data_ptr<index_t>();
cuda::cub::inclusive_sum_by_key(
sorted_data,
at_cuda_detail::cub::ConstantInputIterator<index_t>(1),
count_data,
num_indices
);
// Take the maximum of each count per unique key in reverse:
// sorted: 2 5 5 5 7 7 8 9 9
// count: 1 3 3 3 2 2 1 2 2
cuda::cub::inclusive_scan_by_key(
thrust::make_reverse_iterator(sorted_data + num_indices),
thrust::make_reverse_iterator(count_data + num_indices),
thrust::make_reverse_iterator(count_data + num_indices),
at_cuda_detail::cub::Max(),
num_indices
);
});
#else
AT_DISPATCH_INDEX_TYPES(indices.scalar_type(), "embedding_bag_backward_cuda_sum_avg", [&] () {
embedding_dense_backward_cuda_scan<index_t>(sorted_indices, count);
});
#endif
}
return embedding_backward_cuda_kernel(grad, orig_indices, sorted_indices,
count, num_weights, padding_idx, mode == MODE_MEAN, offset2bag,
bag_size, per_sample_weights);
}
template <typename scalar_t, typename index_t>
__global__ void EmbeddingBag_accGradParametersKernel_max(
index_t *max_indices, scalar_t *gradOutput,
scalar_t *gradWeight, int64_t stride, int64_t numBags,
index_t padding_idx, const index_t numel) {
using accscalar_t = acc_type<scalar_t, true>;
int64_t chunksPerBag = ceil_div(stride, (int64_t)blockDim.x);
int64_t numChunks = numBags * chunksPerBag;
int64_t chunkOffset = blockIdx.x * blockDim.y + threadIdx.y;
int64_t chunkStride = gridDim.x * blockDim.y;
for (int64_t chunk = chunkOffset; chunk < numChunks; chunk += chunkStride) {
int64_t featureDim = (chunk % chunksPerBag) * blockDim.x + threadIdx.x;
if (featureDim < stride) {
int64_t bag = chunk / chunksPerBag;
index_t word_idx = max_indices[bag * stride + featureDim];
if (word_idx >= 0 && word_idx != padding_idx) {
// If bag is empty, we have max_indices[idx] set to -1 in forward.
fastAtomicAdd(
gradWeight, static_cast<index_t>(word_idx * stride + featureDim),
numel, gradOutput[bag * stride + featureDim], true);
}
}
}
}
Tensor embedding_bag_backward_cuda_max(const Tensor &grad,
const Tensor &max_indices,
int64_t num_weights,
int64_t padding_idx) {
// See Note [Writing Nondeterministic Operations]
// Nondeterministic because of atomicAdd usage
globalContext().alertNotDeterministic("embedding_bag_backward_cuda_max");
auto grad_weight = at::zeros({num_weights, grad.size(1)}, grad.options());
int64_t stride = grad_weight.stride(0);
int64_t numBags = grad.size(0);
cudaStream_t stream = at::cuda::getCurrentCUDAStream();
#if defined(USE_ROCM)
dim3 block = dim3(64, 4);
#else
dim3 block = dim3(32, 8);
#endif
int grid = 1024;
AT_DISPATCH_FLOATING_TYPES_AND_HALF(
grad.scalar_type(), "embedding_bag_backward_cuda_max", [&] {
AT_DISPATCH_INDEX_TYPES(max_indices.scalar_type(), "embedding_bag_backward_cuda_max", [&] () {
EmbeddingBag_accGradParametersKernel_max<
scalar_t, index_t><<<grid, block, 0, stream>>>(
max_indices.data_ptr<index_t>(), grad.data_ptr<scalar_t>(),
grad_weight.data_ptr<scalar_t>(), stride, numBags,
padding_idx, grad_weight.numel());
C10_CUDA_KERNEL_LAUNCH_CHECK();
});
});
return grad_weight;
}
}
// Assumes all input tensors are contiguous.
// See NOTE [ embedding_bag Native Functions ] in native_functions.yaml for details
std::tuple<Tensor, Tensor, Tensor, Tensor>
_embedding_bag_forward_only_cuda(const Tensor &weight, const Tensor &indices,
const Tensor &offsets, const bool scale_grad_by_freq,
const int64_t mode, bool sparse, const c10::optional<Tensor>& per_sample_weights_opt,
bool include_last_offset, int64_t padding_idx) {
// See [Note: hacky wrapper removal for optional tensor]
c10::MaybeOwned<Tensor> per_sample_weights_maybe_owned = at::borrow_from_optional_tensor(per_sample_weights_opt);
const Tensor& per_sample_weights = *per_sample_weights_maybe_owned;
return _embedding_bag_cuda(
weight,
indices,
offsets,
scale_grad_by_freq,
mode,
sparse,
per_sample_weights,
include_last_offset,
padding_idx);
}
// Assumes all input tensors are contiguous.
// See NOTE [ embedding_bag Native Functions ] in native_functions.yaml for details
std::tuple<Tensor, Tensor, Tensor, Tensor>
_embedding_bag_cuda(const Tensor &weight, const Tensor &indices_,
const Tensor &offsets_, const bool scale_grad_by_freq,
const int64_t mode, bool sparse, const c10::optional<Tensor>& per_sample_weights_opt,
bool include_last_offset, int64_t padding_idx) {
// See [Note: hacky wrapper removal for optional tensor]
c10::MaybeOwned<Tensor> per_sample_weights_maybe_owned = at::borrow_from_optional_tensor(per_sample_weights_opt);
const Tensor& per_sample_weights = *per_sample_weights_maybe_owned;
Tensor indices, offsets;
std::tie(indices, offsets) = promoteIndicesAndOffsets(indices_, offsets_);
auto indices_arg = TensorArg(indices, "indices", 1);
checkScalarTypes("embedding_bag_cuda", indices_arg, {kLong, kInt});
auto offsets_arg = TensorArg(offsets, "offsets", 1);
checkScalarTypes("embedding_bag_cuda", offsets_arg, {kLong, kInt});
checkSameType("embedding_bag_cuda", indices_arg, offsets_arg);
auto weight_arg = TensorArg(weight, "weight", 1);
checkSameGPU("embedding_bag_cuda", weight_arg, indices_arg);
checkSameGPU("embedding_bag_cuda", weight_arg, offsets_arg);
int64_t numIndices = indices.size(0);
int64_t numBags = offsets.size(0);
if (include_last_offset) {
// Check https://github.com/pytorch/pytorch/issues/29019
// We plan to add one more element in offsets, which is equal to the size of
// indices. Currently for cuda devices, we still use the legacy
// implementation even this flag is enabled.
TORCH_CHECK(
numBags >= 1, "include_last_offset: numBags should be at least 1");
numBags -= 1;
}
int64_t featureSize = weight.size(1);
auto bag_size = at::empty(offsets.sizes(), indices.options());
auto offset2bag =
at::empty({indices.size(0)}, indices.options()); // offset2bag = [0 0 0 0 0]
cudaStream_t stream = at::cuda::getCurrentCUDAStream();
auto output = at::empty({numBags, featureSize}, weight.options());
Tensor max_indices;
if (mode == MODE_MAX) {
max_indices = at::empty({numBags, featureSize}, indices.options());
} else {
// No need to allocate if we aren't doing a backwards pass
max_indices = at::empty({0}, indices.options());
}
#if defined(USE_ROCM)
dim3 block = dim3(64, 4);
#else
dim3 block = dim3(32, 8);
#endif
int grid = 1024;
AT_DISPATCH_FLOATING_TYPES_AND2(at::ScalarType::Half, at::ScalarType::BFloat16, weight.scalar_type(), "embedding_bag_cuda", [&] {
AT_DISPATCH_INDEX_TYPES(indices.scalar_type(), "embedding_bag_cuda", [&] () {
if (mode == MODE_MAX) {
EmbeddingBag_updateOutputKernel_max<scalar_t, index_t><<<grid, block, 0, stream>>>(
indices.data_ptr<index_t>(), offsets.data_ptr<index_t>(),
weight.data_ptr<scalar_t>(), output.data_ptr<scalar_t>(),
offset2bag.data_ptr<index_t>(), numIndices, numBags, featureSize,
weight.stride(0), weight.stride(1), bag_size.data_ptr<index_t>(),
max_indices.data_ptr<index_t>(),
padding_idx);
C10_CUDA_KERNEL_LAUNCH_CHECK();
} else {
EmbeddingBag_updateOutputKernel_sum_mean<scalar_t, index_t><<<grid, block, 0, stream>>>(
indices.data_ptr<index_t>(), offsets.data_ptr<index_t>(),
weight.data_ptr<scalar_t>(), output.data_ptr<scalar_t>(),
offset2bag.data_ptr<index_t>(), numIndices, numBags, featureSize,
weight.stride(0), weight.stride(1), mode, bag_size.data_ptr<index_t>(),
per_sample_weights.defined() ? per_sample_weights.data_ptr<scalar_t>() : NULL,
per_sample_weights.defined() ? per_sample_weights.stride(0) : 0,
padding_idx);
C10_CUDA_KERNEL_LAUNCH_CHECK();
}
});
});
return std::tuple<Tensor, Tensor, Tensor, Tensor>(output, offset2bag, bag_size, max_indices);
}
Tensor _embedding_bag_dense_backward_cuda(const Tensor &grad_, const Tensor &indices,
const Tensor &offset2bag,
const Tensor &bag_size_,
const Tensor &max_indices,
int64_t num_weights,
bool scale_grad_by_freq, int64_t mode, const c10::optional<Tensor>& per_sample_weights_opt,
int64_t padding_idx) {
// See [Note: hacky wrapper removal for optional tensor]
c10::MaybeOwned<Tensor> per_sample_weights_maybe_owned = at::borrow_from_optional_tensor(per_sample_weights_opt);
const Tensor& per_sample_weights = *per_sample_weights_maybe_owned;
// indices, offsets and offset2bag are assumed having correct dtypes and
// contiguous here due to the checks in _embedding_bag_backward in
// EmbeddingBag.cpp.
// Also see NOTE [ embedding_bag Native Functions ] in native_functions.yaml
// for more details.
Tensor grad = grad_.contiguous();
auto indices_arg = TensorArg(indices, "indices", 1);
auto grad_arg = TensorArg(grad, "grad", 1);
checkSameGPU("embedding_bag_cuda", grad_arg, indices_arg);
switch (mode) {
case MODE_SUM:
case MODE_MEAN:
if (mode == MODE_MEAN)
AT_ASSERT(!per_sample_weights.defined());
return embedding_bag_backward_cuda_sum_avg(grad, indices, offset2bag,
bag_size_, num_weights, scale_grad_by_freq, mode,
per_sample_weights, padding_idx);
case MODE_MAX:
AT_ASSERT(!per_sample_weights.defined());
return embedding_bag_backward_cuda_max(grad, max_indices, num_weights,
padding_idx);
default:
AT_ERROR(
"Unknown mode for embedding_bag_backward_cuda ", mode);
}
}
template <typename scalar_t>
__inline__ __device__
static scalar_t warpReduceSum(scalar_t val) {
for (int offset = C10_WARP_SIZE/2; offset > 0; offset /= 2)
val += WARP_SHFL_DOWN(val, offset);
return val;
}
template <typename scalar_t, typename index_t>
__global__ static void _embedding_bag_per_sample_weights_backward_kernel(
const scalar_t* grad, int64_t grad_stride0, int64_t grad_stride1,
const scalar_t* weight, int64_t weight_stride0, int64_t weight_stride1,
const index_t* indices, // contiguous
const index_t* offset2bag, // contiguous
int64_t num_samples,
int64_t embedding_features,
scalar_t* output,
index_t padding_idx) {
using accscalar_t = acc_type<scalar_t, true>;
const int idx = threadIdx.x + blockIdx.x * blockDim.x;
const int warp = idx / C10_WARP_SIZE;
const int thread_in_warp = idx % C10_WARP_SIZE;
const int num_warps = blockDim.x * gridDim.x / C10_WARP_SIZE;
// Each warp is responsible for the accumulation of one sample.
// This involves doing one dot product between grad[bag_idx] and weight[embedding_idx].
for (int sample_idx = warp; sample_idx < num_samples; sample_idx += num_warps) {
accscalar_t result = 0.;
const int bag_idx = (int)offset2bag[sample_idx];
const int embedding_idx = (int)indices[sample_idx];
if (embedding_idx != padding_idx) {
for (int feature_idx = thread_in_warp; feature_idx < embedding_features;
feature_idx += C10_WARP_SIZE) {
result +=
grad[grad_stride0 * bag_idx + grad_stride1 * feature_idx] *
weight[weight_stride0 * embedding_idx + weight_stride1 * feature_idx];
}
}
result = warpReduceSum<accscalar_t>(result);
if (thread_in_warp == 0) {
output[sample_idx] = result;
}
}
}
Tensor _embedding_bag_per_sample_weights_backward_cuda(
const Tensor& grad,
const Tensor& weight, // NB: embedding table, not per_sample_weights
const Tensor& indices_,
const Tensor& offsets_,
const Tensor& offset2bag,
int64_t mode,
int64_t padding_idx) {
TORCH_CHECK(
mode == MODE_SUM,
"embedding_bag_backward: per_sample_weights only supported for mode='sum'");
AT_ASSERT(grad.dim() == 2);
auto embedding_features = grad.size(1);
Tensor indices, offsets;
std::tie(indices, offsets) = promoteIndicesAndOffsets(indices_, offsets_);
AT_ASSERT(indices.dim() == 1);
auto num_samples = indices.size(0);
AT_ASSERT(weight.dim() == 2);
AT_ASSERT(weight.size(1) == embedding_features);
const int threads_per_block = 512;
const int warps_per_block = threads_per_block / at::cuda::warp_size();
dim3 block(threads_per_block);
dim3 grid((num_samples + warps_per_block - 1) / warps_per_block);
auto output = at::empty({num_samples}, grad.options());
// Early return when there is no samples in the batch. This saves unnecesary kernel
// launch, but also prevents cudaGetLastError() to complain about invalid launch args
if (num_samples == 0) {
return output;
}
AT_DISPATCH_FLOATING_TYPES_AND_HALF(
grad.scalar_type(), "_embedding_bag_per_sample_weights_backward_cuda", [&]() {
AT_DISPATCH_INDEX_TYPES(indices.scalar_type(), "_embedding_bag_per_sample_weights_backward_cuda", [&]() {
_embedding_bag_per_sample_weights_backward_kernel<scalar_t, index_t>
<<<grid, block, 0, at::cuda::getCurrentCUDAStream()>>>(
grad.data_ptr<scalar_t>(), grad.stride(0), grad.stride(1),
weight.data_ptr<scalar_t>(), weight.stride(0), weight.stride(1),
indices.data_ptr<index_t>(),
offset2bag.data_ptr<index_t>(),
num_samples,
embedding_features,
output.data_ptr<scalar_t>(),
padding_idx);
C10_CUDA_KERNEL_LAUNCH_CHECK();
});
}
);
return output;
}
}
}
|
48ce13e269ba8706bc4567b2ede4f647e2c17015.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "helper.hpp"
__global__ void s2g_gpu_scatter_kernel(uint32_t *in, uint32_t *out, int len) {
//@@ INSERT KERNEL CODE HERE
}
static void s2g_cpu_scatter(uint32_t *in, uint32_t *out, int len) {
for (int inIdx = 0; inIdx < len; ++inIdx) {
uint32_t intermediate = outInvariant(in[inIdx]);
for (int outIdx = 0; outIdx < len; ++outIdx) {
out[outIdx] += outDependent(intermediate, inIdx, outIdx);
}
}
}
static void s2g_gpu_scatter(uint32_t *in, uint32_t *out, int len) {
//@@ INSERT CODE HERE
}
static int eval(int inputLength) {
uint32_t *deviceInput = nullptr;
uint32_t *deviceOutput= nullptr;
const std::string conf_info =
std::string("scatter[len:") + std::to_string(inputLength) + "]";
INFO("Running " << conf_info);
auto hostInput = generate_input(inputLength);
const size_t byteCount = inputLength * sizeof(uint32_t);
timer_start("Allocating GPU memory.");
THROW_IF_ERROR(hipMalloc((void **)&deviceInput, byteCount));
THROW_IF_ERROR(hipMalloc((void **)&deviceOutput, byteCount));
timer_stop();
timer_start("Copying input memory to the GPU.");
THROW_IF_ERROR(hipMemcpy(deviceInput, hostInput.data(), byteCount,
hipMemcpyHostToDevice));
THROW_IF_ERROR(hipMemset(deviceOutput, 0, byteCount));
timer_stop();
//////////////////////////////////////////
// GPU Scatter Computation
//////////////////////////////////////////
timer_start( "Performing GPU Scatter computation");
s2g_gpu_scatter(deviceInput, deviceOutput, inputLength);
timer_stop();
std::vector<uint32_t> hostOutput(inputLength);
timer_start( "Copying output memory to the CPU");
THROW_IF_ERROR(hipMemcpy(hostOutput.data(), deviceOutput, byteCount,
hipMemcpyDeviceToHost));
timer_stop();
auto expected = compute_output(hostInput, inputLength);
verify(expected, hostOutput);
hipFree(deviceInput);
hipFree(deviceOutput);
return 0;
}
TEST_CASE("Scatter", "[scatter]") {
SECTION("[inputSize:1024]") {
eval(1024);
}
SECTION("[inputSize:2048]") {
eval(2048);
}
SECTION("[inputSize:2047]") {
eval(2047);
}
SECTION("[inputSize:2049]") {
eval(2049);
}
SECTION("[inputSize:9101]") {
eval(9101);
}
SECTION("[inputSize:9910]") {
eval(9910);
}
SECTION("[inputSize:8192]") {
eval(8192);
}
SECTION("[inputSize:8193]") {
eval(8193);
}
SECTION("[inputSize:8191]") {
eval(8191);
}
SECTION("[inputSize:16191]") {
eval(16191);
}
}
| 48ce13e269ba8706bc4567b2ede4f647e2c17015.cu | #include "helper.hpp"
__global__ void s2g_gpu_scatter_kernel(uint32_t *in, uint32_t *out, int len) {
//@@ INSERT KERNEL CODE HERE
}
static void s2g_cpu_scatter(uint32_t *in, uint32_t *out, int len) {
for (int inIdx = 0; inIdx < len; ++inIdx) {
uint32_t intermediate = outInvariant(in[inIdx]);
for (int outIdx = 0; outIdx < len; ++outIdx) {
out[outIdx] += outDependent(intermediate, inIdx, outIdx);
}
}
}
static void s2g_gpu_scatter(uint32_t *in, uint32_t *out, int len) {
//@@ INSERT CODE HERE
}
static int eval(int inputLength) {
uint32_t *deviceInput = nullptr;
uint32_t *deviceOutput= nullptr;
const std::string conf_info =
std::string("scatter[len:") + std::to_string(inputLength) + "]";
INFO("Running " << conf_info);
auto hostInput = generate_input(inputLength);
const size_t byteCount = inputLength * sizeof(uint32_t);
timer_start("Allocating GPU memory.");
THROW_IF_ERROR(cudaMalloc((void **)&deviceInput, byteCount));
THROW_IF_ERROR(cudaMalloc((void **)&deviceOutput, byteCount));
timer_stop();
timer_start("Copying input memory to the GPU.");
THROW_IF_ERROR(cudaMemcpy(deviceInput, hostInput.data(), byteCount,
cudaMemcpyHostToDevice));
THROW_IF_ERROR(cudaMemset(deviceOutput, 0, byteCount));
timer_stop();
//////////////////////////////////////////
// GPU Scatter Computation
//////////////////////////////////////////
timer_start( "Performing GPU Scatter computation");
s2g_gpu_scatter(deviceInput, deviceOutput, inputLength);
timer_stop();
std::vector<uint32_t> hostOutput(inputLength);
timer_start( "Copying output memory to the CPU");
THROW_IF_ERROR(cudaMemcpy(hostOutput.data(), deviceOutput, byteCount,
cudaMemcpyDeviceToHost));
timer_stop();
auto expected = compute_output(hostInput, inputLength);
verify(expected, hostOutput);
cudaFree(deviceInput);
cudaFree(deviceOutput);
return 0;
}
TEST_CASE("Scatter", "[scatter]") {
SECTION("[inputSize:1024]") {
eval(1024);
}
SECTION("[inputSize:2048]") {
eval(2048);
}
SECTION("[inputSize:2047]") {
eval(2047);
}
SECTION("[inputSize:2049]") {
eval(2049);
}
SECTION("[inputSize:9101]") {
eval(9101);
}
SECTION("[inputSize:9910]") {
eval(9910);
}
SECTION("[inputSize:8192]") {
eval(8192);
}
SECTION("[inputSize:8193]") {
eval(8193);
}
SECTION("[inputSize:8191]") {
eval(8191);
}
SECTION("[inputSize:16191]") {
eval(16191);
}
}
|
1eb7614d155124245f7a1de6df95da7de4060ba1.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/******************************************************************************
* Copyright 2018 The Apollo Authors. All Rights Reserved.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*****************************************************************************/
#include <vector>
#include "modules/perception/inference/tensorrt/plugins/slice_plugin.h"
namespace apollo {
namespace perception {
namespace inference {
typedef int8_t int8;
template <typename Dtype>
__global__ void Slice(const int nthreads, const Dtype *in_data,
const int num_slices, const int slice_size,
const int bottom_slice_axis, const int top_slice_axis,
const int offset_slice_axis, Dtype *out_data) {
int index = blockIdx.x * blockDim.x + threadIdx.x;
if (index < nthreads) {
const int total_slice_size = slice_size * top_slice_axis;
const int slice_num = index / total_slice_size;
const int slice_index = index % total_slice_size;
const int bottom_index =
slice_index +
(slice_num * bottom_slice_axis + offset_slice_axis) * slice_size;
out_data[index] = in_data[bottom_index];
}
}
int SLICEPlugin::enqueue(int batchSize, const void *const *inputs,
void **outputs, void *workspace, hipStream_t stream) {
int slice_size = 1;
for (size_t index = axis_ + 1; index < input_dims_.nbDims; ++index) {
slice_size *= input_dims_.d[index];
}
int num_slices = batchSize;
for (size_t index = 0; index < axis_; ++index) {
num_slices *= input_dims_.d[index];
}
int offset_slice_axis = 0;
for (int i = 0; i < out_slice_dims_.size(); ++i) {
const int top_slice_axis = out_slice_dims_[i];
const int top_slice_size = top_slice_axis * slice_size;
const int nthreads = top_slice_size * num_slices;
const int block_num = (nthreads + 511) / 512;
Slice // NOLINT_NEXT_LINE(whitespacehipLaunchKernelGGL((/operators))
, dim3(block_num), dim3(512), 0, stream,
nthreads, (const float *)(inputs[0]), num_slices, slice_size,
input_dims_.d[axis_], top_slice_axis, offset_slice_axis,
reinterpret_cast<float *>(outputs[i]));
offset_slice_axis += top_slice_axis;
}
return 1;
}
} // namespace inference
} // namespace perception
} // namespace apollo
| 1eb7614d155124245f7a1de6df95da7de4060ba1.cu | /******************************************************************************
* Copyright 2018 The Apollo Authors. All Rights Reserved.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*****************************************************************************/
#include <vector>
#include "modules/perception/inference/tensorrt/plugins/slice_plugin.h"
namespace apollo {
namespace perception {
namespace inference {
typedef int8_t int8;
template <typename Dtype>
__global__ void Slice(const int nthreads, const Dtype *in_data,
const int num_slices, const int slice_size,
const int bottom_slice_axis, const int top_slice_axis,
const int offset_slice_axis, Dtype *out_data) {
int index = blockIdx.x * blockDim.x + threadIdx.x;
if (index < nthreads) {
const int total_slice_size = slice_size * top_slice_axis;
const int slice_num = index / total_slice_size;
const int slice_index = index % total_slice_size;
const int bottom_index =
slice_index +
(slice_num * bottom_slice_axis + offset_slice_axis) * slice_size;
out_data[index] = in_data[bottom_index];
}
}
int SLICEPlugin::enqueue(int batchSize, const void *const *inputs,
void **outputs, void *workspace, cudaStream_t stream) {
int slice_size = 1;
for (size_t index = axis_ + 1; index < input_dims_.nbDims; ++index) {
slice_size *= input_dims_.d[index];
}
int num_slices = batchSize;
for (size_t index = 0; index < axis_; ++index) {
num_slices *= input_dims_.d[index];
}
int offset_slice_axis = 0;
for (int i = 0; i < out_slice_dims_.size(); ++i) {
const int top_slice_axis = out_slice_dims_[i];
const int top_slice_size = top_slice_axis * slice_size;
const int nthreads = top_slice_size * num_slices;
const int block_num = (nthreads + 511) / 512;
Slice // NOLINT_NEXT_LINE(whitespace/operators)
<<<block_num, 512, 0, stream>>>(
nthreads, (const float *)(inputs[0]), num_slices, slice_size,
input_dims_.d[axis_], top_slice_axis, offset_slice_axis,
reinterpret_cast<float *>(outputs[i]));
offset_slice_axis += top_slice_axis;
}
return 1;
}
} // namespace inference
} // namespace perception
} // namespace apollo
|
6fc02fe2abe77e64d8a499d2f16337319ab811ce.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "BinaryPlugin.hpp"
namespace MNN {
template <typename T>
__global__ void ADD(const int n, const T* in0, const T* in1, T* out, int s0, int s1){
CUDA_KERNEL_LOOP(index, n) {
out[index] = in0[index * s0] + in1[index * s1];
}
}
template <typename T>
__global__ void SUB(const int n, const T* in0, const T* in1, T* out, int s0, int s1){
CUDA_KERNEL_LOOP(index, n) {
out[index] = in0[index * s0] - in1[index * s1];
}
}
template <typename T>
__global__ void MUL(const int n, const T* in0, const T* in1, T* out, int s0, int s1){
CUDA_KERNEL_LOOP(index, n) {
out[index] = in0[index * s0] * in1[index * s1];
}
}
template <typename T>
__global__ void DIV(const int n, const T* in0, const T* in1, T* out, int s0, int s1){
CUDA_KERNEL_LOOP(index, n) {
out[index] = in0[index * s0] / in1[index * s1];
}
}
template <typename T>
__global__ void SQD(const int n, const T* in0, const T* in1, T* out, int s0, int s1){
CUDA_KERNEL_LOOP(index, n) {
T data = in0[index * s0] - in1[index * s1];
out[index] = data * data;
}
}
template <typename T>
__global__ void MAXIMUM(const int n, const T* in0, const T* in1, T* out, int s0, int s1);
template <>
__global__ void MAXIMUM<float>(const int n, const float* in0, const float* in1, float* out, int s0, int s1) {
CUDA_KERNEL_LOOP(index, n) {
out[index] = max(in0[index * s0], in1[index * s1]);
}
}
template <>
__global__ void MAXIMUM<half>(const int n, const half* in0, const half* in1, half* out, int s0, int s1) {
CUDA_KERNEL_LOOP(index, n) {
float tmp = max(__half2float(in0[index * s0]) , __half2float(in1[index * s1]));
out[index] = __float2half(tmp);
}
}
template <typename T>
__global__ void MINIMUM(const int n, const T* in0, const T* in1, T* out, int s0, int s1);
template <>
__global__ void MINIMUM<float>(const int n, const float* in0, const float* in1, float* out, int s0, int s1) {
CUDA_KERNEL_LOOP(index, n) {
out[index] = min(in0[index * s0], in1[index * s1]);
}
}
template <>
__global__ void MINIMUM<half>(const int n, const half* in0, const half* in1, half* out, int s0, int s1) {
CUDA_KERNEL_LOOP(index, n) {
float tmp = min(__half2float(in0[index * s0]) , __half2float(in1[index * s1]));
out[index] = __float2half(tmp);
}
}
template <typename T>
__global__ void POW(const int n, const T* in0, const T* in1, T* out, int s0, int s1);
template <>
__global__ void POW<float>(const int n, const float* in0, const float* in1, float* out, int s0, int s1) {
CUDA_KERNEL_LOOP(index, n) {
out[index] = pow(in0[index * s0], in1[index * s1]);
}
}
template <>
__global__ void POW<half>(const int n, const half* in0, const half* in1, half* out, int s0, int s1) {
CUDA_KERNEL_LOOP(index, n) {
float tmp = pow(__half2float(in0[index * s0]), __half2float(in1[index * s1]));
out[index] = __float2half(tmp);
}
}
template <typename T>
hipError_t binary_template(int type, const int count, const T* bottom_data0, const T* bottom_data1, T* top_data, int s0, int s1, hipStream_t stream){
if (type == 0) {
hipLaunchKernelGGL(( ADD<T>), dim3(CAFFE_GET_BLOCKS(count)), dim3(CUDA_NUM_THREADS), 0, 0, count, bottom_data0, bottom_data1, top_data, s0, s1);
} else if (type == 1) {
hipLaunchKernelGGL(( SUB<T>), dim3(CAFFE_GET_BLOCKS(count)), dim3(CUDA_NUM_THREADS), 0, 0, count, bottom_data0, bottom_data1, top_data,
s0, s1);
} else if (type == 2) {
hipLaunchKernelGGL(( MUL<T>), dim3(CAFFE_GET_BLOCKS(count)), dim3(CUDA_NUM_THREADS), 0, 0, count, bottom_data0, bottom_data1, top_data,
s0, s1);
} else if (type == 6) {
hipLaunchKernelGGL(( POW<T>), dim3(CAFFE_GET_BLOCKS(count)), dim3(CUDA_NUM_THREADS), 0, 0, count, bottom_data0, bottom_data1, top_data,
s0, s1);
} else if (type == 3 || type == 7) {
hipLaunchKernelGGL(( DIV<T>), dim3(CAFFE_GET_BLOCKS(count)), dim3(CUDA_NUM_THREADS), 0, 0, count, bottom_data0, bottom_data1, top_data,
s0, s1);
} else if (type == 9) {
hipLaunchKernelGGL(( MAXIMUM<T>), dim3(CAFFE_GET_BLOCKS(count)), dim3(CUDA_NUM_THREADS), 0, 0, count, bottom_data0, bottom_data1,
top_data, s0, s1);
} else if (type == 8) {
hipLaunchKernelGGL(( MINIMUM<T>), dim3(CAFFE_GET_BLOCKS(count)), dim3(CUDA_NUM_THREADS), 0, 0, count, bottom_data0, bottom_data1,
top_data, s0, s1);
} else if (type == 14){
hipLaunchKernelGGL(( SQD<T>), dim3(CAFFE_GET_BLOCKS(count)), dim3(CUDA_NUM_THREADS), 0, 0, count, bottom_data0, bottom_data1, top_data, s0, s1);
} else {
printf("binary op not support:%d\n", type);
}
return hipPeekAtLastError();
}
hipError_t BinaryPlugin::BinaryExecute(nvinfer1::DataType dataType, const int count, const void *const *inputs, void **outputs, int s0, int s1, hipStream_t stream) {
#ifdef TRT_LOG
printf("in mType:%d\n", mType);
#endif
if (dataType == nvinfer1::DataType::kFLOAT){
return binary_template<float>(mType, count, (const float*)inputs[0], (const float*)inputs[1], (float*)outputs[0], s0, s1, stream);
}else{
return binary_template<__half>(mType, count, static_cast<const __half*>(inputs[0]), static_cast<const __half*>(inputs[1]), static_cast<__half*>(outputs[0]), s0, s1, stream);
}
}
}; // namespace MNN | 6fc02fe2abe77e64d8a499d2f16337319ab811ce.cu | #include "BinaryPlugin.hpp"
namespace MNN {
template <typename T>
__global__ void ADD(const int n, const T* in0, const T* in1, T* out, int s0, int s1){
CUDA_KERNEL_LOOP(index, n) {
out[index] = in0[index * s0] + in1[index * s1];
}
}
template <typename T>
__global__ void SUB(const int n, const T* in0, const T* in1, T* out, int s0, int s1){
CUDA_KERNEL_LOOP(index, n) {
out[index] = in0[index * s0] - in1[index * s1];
}
}
template <typename T>
__global__ void MUL(const int n, const T* in0, const T* in1, T* out, int s0, int s1){
CUDA_KERNEL_LOOP(index, n) {
out[index] = in0[index * s0] * in1[index * s1];
}
}
template <typename T>
__global__ void DIV(const int n, const T* in0, const T* in1, T* out, int s0, int s1){
CUDA_KERNEL_LOOP(index, n) {
out[index] = in0[index * s0] / in1[index * s1];
}
}
template <typename T>
__global__ void SQD(const int n, const T* in0, const T* in1, T* out, int s0, int s1){
CUDA_KERNEL_LOOP(index, n) {
T data = in0[index * s0] - in1[index * s1];
out[index] = data * data;
}
}
template <typename T>
__global__ void MAXIMUM(const int n, const T* in0, const T* in1, T* out, int s0, int s1);
template <>
__global__ void MAXIMUM<float>(const int n, const float* in0, const float* in1, float* out, int s0, int s1) {
CUDA_KERNEL_LOOP(index, n) {
out[index] = max(in0[index * s0], in1[index * s1]);
}
}
template <>
__global__ void MAXIMUM<half>(const int n, const half* in0, const half* in1, half* out, int s0, int s1) {
CUDA_KERNEL_LOOP(index, n) {
float tmp = max(__half2float(in0[index * s0]) , __half2float(in1[index * s1]));
out[index] = __float2half(tmp);
}
}
template <typename T>
__global__ void MINIMUM(const int n, const T* in0, const T* in1, T* out, int s0, int s1);
template <>
__global__ void MINIMUM<float>(const int n, const float* in0, const float* in1, float* out, int s0, int s1) {
CUDA_KERNEL_LOOP(index, n) {
out[index] = min(in0[index * s0], in1[index * s1]);
}
}
template <>
__global__ void MINIMUM<half>(const int n, const half* in0, const half* in1, half* out, int s0, int s1) {
CUDA_KERNEL_LOOP(index, n) {
float tmp = min(__half2float(in0[index * s0]) , __half2float(in1[index * s1]));
out[index] = __float2half(tmp);
}
}
template <typename T>
__global__ void POW(const int n, const T* in0, const T* in1, T* out, int s0, int s1);
template <>
__global__ void POW<float>(const int n, const float* in0, const float* in1, float* out, int s0, int s1) {
CUDA_KERNEL_LOOP(index, n) {
out[index] = pow(in0[index * s0], in1[index * s1]);
}
}
template <>
__global__ void POW<half>(const int n, const half* in0, const half* in1, half* out, int s0, int s1) {
CUDA_KERNEL_LOOP(index, n) {
float tmp = pow(__half2float(in0[index * s0]), __half2float(in1[index * s1]));
out[index] = __float2half(tmp);
}
}
template <typename T>
cudaError_t binary_template(int type, const int count, const T* bottom_data0, const T* bottom_data1, T* top_data, int s0, int s1, cudaStream_t stream){
if (type == 0) {
ADD<T><<<CAFFE_GET_BLOCKS(count), CUDA_NUM_THREADS>>>(count, bottom_data0, bottom_data1, top_data, s0, s1);
} else if (type == 1) {
SUB<T><<<CAFFE_GET_BLOCKS(count), CUDA_NUM_THREADS>>>(count, bottom_data0, bottom_data1, top_data,
s0, s1);
} else if (type == 2) {
MUL<T><<<CAFFE_GET_BLOCKS(count), CUDA_NUM_THREADS>>>(count, bottom_data0, bottom_data1, top_data,
s0, s1);
} else if (type == 6) {
POW<T><<<CAFFE_GET_BLOCKS(count), CUDA_NUM_THREADS>>>(count, bottom_data0, bottom_data1, top_data,
s0, s1);
} else if (type == 3 || type == 7) {
DIV<T><<<CAFFE_GET_BLOCKS(count), CUDA_NUM_THREADS>>>(count, bottom_data0, bottom_data1, top_data,
s0, s1);
} else if (type == 9) {
MAXIMUM<T><<<CAFFE_GET_BLOCKS(count), CUDA_NUM_THREADS>>>(count, bottom_data0, bottom_data1,
top_data, s0, s1);
} else if (type == 8) {
MINIMUM<T><<<CAFFE_GET_BLOCKS(count), CUDA_NUM_THREADS>>>(count, bottom_data0, bottom_data1,
top_data, s0, s1);
} else if (type == 14){
SQD<T><<<CAFFE_GET_BLOCKS(count), CUDA_NUM_THREADS>>>(count, bottom_data0, bottom_data1, top_data, s0, s1);
} else {
printf("binary op not support:%d\n", type);
}
return cudaPeekAtLastError();
}
cudaError_t BinaryPlugin::BinaryExecute(nvinfer1::DataType dataType, const int count, const void *const *inputs, void **outputs, int s0, int s1, cudaStream_t stream) {
#ifdef TRT_LOG
printf("in mType:%d\n", mType);
#endif
if (dataType == nvinfer1::DataType::kFLOAT){
return binary_template<float>(mType, count, (const float*)inputs[0], (const float*)inputs[1], (float*)outputs[0], s0, s1, stream);
}else{
return binary_template<__half>(mType, count, static_cast<const __half*>(inputs[0]), static_cast<const __half*>(inputs[1]), static_cast<__half*>(outputs[0]), s0, s1, stream);
}
}
}; // namespace MNN |
31dc86530bf307b98bf3705fab2302601d66ff58.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
//--------------------------------------------------------------------------------
// NVIDIA(R) GVDB VOXELS
// Copyright 2017, NVIDIA Corporation.
//
// Redistribution and use in source and binary forms, with or without modification,
// are permitted provided that the following conditions are met:
// 1. Redistributions of source code must retain the above copyright notice, this list of conditions and the following disclaimer.
// 2. Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the following disclaimer
// in the documentation and/or other materials provided with the distribution.
// 3. Neither the name of the copyright holder nor the names of its contributors may be used to endorse or promote products derived
// from this software without specific prior written permission.
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING,
// BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT
// SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
// DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
// INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE
// OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
//
// Version 1.0: Rama Hoetzlein, 5/1/2017
//----------------------------------------------------------------------------------
// File: cuda_gvdb_raycast.cu
//
// GVDB Raycasting
// - RayDeep - deep volume sampling
// - RaySurface - surface hit raycast
// - RayLevelSet - level set raycast
// - Raytrace - raytrace a bundle of arbitrary rays
// - Section3D - 3D cross-section render
// - Section2D - 2D cross-section render
//-----------------------------------------------
#include <stdio.h>
#include "cuda_math.cuh"
//----------------------------------------- GVDB Data Structure
#define CUDA_PATHWAY
#include "cuda_gvdb_scene.cuh" // GVDB Scene
#include "cuda_gvdb_nodes.cuh" // GVDB Node structure
#include "cuda_gvdb_geom.cuh" // GVDB Geom helpers
#include "cuda_gvdb_dda.cuh" // GVDB DDA
#include "cuda_gvdb_raycast.cuh" // GVDB Raycasting
#include "radixsort_kernel.cuh"
//-----------------------------------------
// Operator functions
#include "cuda_gvdb_operators.cuh"
// Particle functions
#include "cuda_gvdb_particles.cuh"
inline __device__ float4 performPhongShading( VDBInfo* gvdb, uchar chan, float3 shit, float3 snorm, float4 sclr, gvdbBrickFunc_t brickfunc )
{
if ( shit.z == NOHIT) // no surface hit
return SCN_BACKCLR;
// phong
float3 lightdir = normalize(scn.light_pos - shit);
float diff = 0.9 * max(0.0f, dot(snorm, lightdir) );
float amb = 0.1f;
// shadow ray
if (SCN_SHADOWAMT > 0) {
float3 hit2 = make_float3(0,0,NOHIT);
float4 hclr2 = make_float4(0,0,0,1);
float3 norm2;
rayCast ( gvdb, chan, shit + snorm * SCN_SHADOWBIAS, lightdir, hit2, norm2, hclr2, brickfunc ); // shadow ray
diff = (hit2.z==NOHIT ? diff : diff*(1.0-SCN_SHADOWAMT) );
}
return make_float4( fxyz(sclr) * (diff + amb), 1.0 );
}
// Raytracing functions
extern "C" __global__ void gvdbRayDeep ( VDBInfo* gvdb, uchar chan, uchar4* outBuf )
{
int x = blockIdx.x * blockDim.x + threadIdx.x;
int y = blockIdx.y * blockDim.y + threadIdx.y;
if ( x >= scn.width || y >= scn.height ) return;
float4 clr = make_float4(0,0,0,1);
float3 hit = make_float3(0,0,NOHIT);
float3 norm;
float3 rpos = getViewPos();
float3 rdir = getViewRay(float(x + 0.5f) / float(scn.width), float(y + 0.5f) / float(scn.height));
// ray deep sampling
rayCast ( gvdb, chan, rpos, rdir, hit, norm, clr, rayDeepBrick );
clr = make_float4( lerp3(SCN_BACKCLR, clr, 1.0-clr.w), 1.0-clr.w );
outBuf [ y*scn.width + x ] = make_uchar4( clr.x*255, clr.y*255, clr.z*255, clr.w*255 );
}
// Render the volume data by raycasting
extern "C" __global__ void gvdbRaySurfaceVoxel ( VDBInfo* gvdb, uchar chan, uchar4* outBuf )
{
int x = blockIdx.x * blockDim.x + threadIdx.x;
int y = blockIdx.y * blockDim.y + threadIdx.y;
if ( x >= scn.width || y >= scn.height ) return;
float3 hit = make_float3(NOHIT,NOHIT,NOHIT);
float3 norm;
float3 rpos = getViewPos();
float3 rdir = getViewRay(float(x + 0.5f) / float(scn.width), float(y + 0.5f) / float(scn.height));
float4 clr = make_float4(1,1,1,1);
// ray surface hit
rayCast ( gvdb, chan, rpos, rdir, hit, norm, clr, raySurfaceVoxelBrick );
clr = performPhongShading ( gvdb, chan, hit, norm, clr, raySurfaceVoxelBrick );
outBuf[y*scn.width + x] = make_uchar4(clr.x*255, clr.y*255, clr.z*255, clr.w*255);
}
// Render the volume data by raycasting
extern "C" __global__ void gvdbRaySurfaceTrilinear ( VDBInfo* gvdb, uchar chan, uchar4* outBuf )
{
int x = blockIdx.x * blockDim.x + threadIdx.x;
int y = blockIdx.y * blockDim.y + threadIdx.y;
if ( x >= scn.width || y >= scn.height ) return;
float3 hit = make_float3(NOHIT,NOHIT,NOHIT);
float3 norm;
float3 rpos = getViewPos();
float3 rdir = getViewRay(float(x + 0.5f) / float(scn.width), float(y + 0.5f) / float(scn.height));
float4 clr = make_float4(1,1,1,1);
// ray surface hit
rayCast ( gvdb, chan, rpos, rdir, hit, norm, clr, raySurfaceTrilinearBrick );
clr = performPhongShading ( gvdb, chan, hit, norm, clr, raySurfaceTrilinearBrick );
outBuf[y*scn.width + x] = make_uchar4(clr.x*255, clr.y*255, clr.z*255, clr.w*255);
}
// Render the volume data by raycasting
extern "C" __global__ void gvdbRaySurfaceTricubic ( VDBInfo* gvdb, uchar chan, uchar4* outBuf )
{
int x = blockIdx.x * blockDim.x + threadIdx.x;
int y = blockIdx.y * blockDim.y + threadIdx.y;
if ( x >= scn.width || y >= scn.height ) return;
float3 hit = make_float3(NOHIT,NOHIT,NOHIT);
float3 norm;
float3 rpos = getViewPos();
float3 rdir = getViewRay(float(x + 0.5f) / float(scn.width), float(y + 0.5f) / float(scn.height));
float4 clr = make_float4(1,1,1,1);
// ray surface hit
rayCast ( gvdb, chan, rpos, rdir, hit, norm, clr, raySurfaceTricubicBrick );
clr = performPhongShading ( gvdb, chan, hit, norm, clr, raySurfaceTrilinearBrick );
outBuf[y*scn.width + x] = make_uchar4(clr.x*255, clr.y*255, clr.z*255, clr.w*255);
}
// Render the volume data by raycasting
extern "C" __global__ void gvdbRaySurfaceDepth ( VDBInfo* gvdb, uchar chan, uchar4* outBuf )
{
int x = blockIdx.x * blockDim.x + threadIdx.x;
int y = blockIdx.y * blockDim.y + threadIdx.y;
if ( x >= scn.width || y >= scn.height ) return;
float3 hit = make_float3(NOHIT,NOHIT,NOHIT);
float3 norm;
float3 rpos = getViewPos();
float3 rdir = getViewRay(float(x + 0.5f) / float(scn.width), float(y + 0.5f) / float(scn.height));
float4 clr = make_float4(1,1,1,1);
// ray surface hit
// *NOTE*: raySurfaceDepthBrick not yet implemented
rayCast (gvdb, chan, rpos, rdir, hit, norm, clr, raySurfaceTrilinearBrick );
clr = performPhongShading ( gvdb, chan, hit, norm, clr, raySurfaceTrilinearBrick );
outBuf[y*scn.width + x] = make_uchar4(clr.x*255, clr.y*255, clr.z*255, clr.w*255 );
}
// Render the volume data by raycasting
extern "C" __global__ void gvdbRayLevelSet ( VDBInfo* gvdb, uchar chan, uchar4* outBuf )
{
int x = blockIdx.x * blockDim.x + threadIdx.x;
int y = blockIdx.y * blockDim.y + threadIdx.y;
if ( x >= scn.width || y >= scn.height ) return;
float3 hit = make_float3(0,0,NOHIT);
float4 clr = make_float4(1,1,1,1);
float3 norm;
float3 rpos = getViewPos();
float3 rdir = getViewRay(float(x + 0.5f) / float(scn.width), float(y + 0.5f) / float(scn.height));
// Raycast Level Set
rayCast ( gvdb, chan, rpos, rdir, hit, norm, clr, rayLevelSetBrick );
clr = performPhongShading ( gvdb, chan, hit, norm, clr, rayLevelSetBrick );
outBuf [ y*scn.width + x ] = make_uchar4( clr.x*255, clr.y*255, clr.z*255, clr.w*255 );
}
// Render the volume data by raycasting
extern "C" __global__ void gvdbRayEmptySkip ( VDBInfo* gvdb, uchar chan, uchar4* outBuf )
{
int x = blockIdx.x * blockDim.x + threadIdx.x;
int y = blockIdx.y * blockDim.y + threadIdx.y;
if ( x >= scn.width || y >= scn.height ) return;
float4 clr = make_float4(1,1,1,1);
float3 hit = make_float3(NOHIT,NOHIT,NOHIT);
float3 norm;
float3 rpos = getViewPos();
float3 rdir = getViewRay(float(x + 0.5f) / float(scn.width), float(y + 0.5f) / float(scn.height));
// Empty skipping
rayCast ( gvdb, chan, rpos, rdir, hit, norm, clr, rayEmptySkipBrick );
if ( hit.z != NOHIT) {
clr = make_float4( hit * 0.01, 1 );
} else {
clr = SCN_BACKCLR;
}
outBuf [ y*scn.width + x ] = make_uchar4( clr.x*255, clr.y*255, clr.z*255, 255 );
}
// Raytrace a bundle of rays
extern "C" __global__ void gvdbRaytrace ( VDBInfo* gvdb, uchar chan, int num_rays, ScnRay* rays, float bias )
{
int x = blockIdx.x * blockDim.x + threadIdx.x;
if ( x >= num_rays ) return;
// raytrace
rays[x].hit = make_float3(NOHIT, NOHIT, NOHIT);
float4 hclr = make_float4(1,1,1,1);
rayCast ( gvdb, chan, rays[x].orig, rays[x].dir, rays[x].hit, rays[x].normal, hclr, raySurfaceTrilinearBrick );
if ( rays[x].hit.z != NOHIT ) rays[x].hit -= rays[x].dir * bias;
}
// Render a cross section of the volume data in 3D
extern "C" __global__ void gvdbSection3D ( VDBInfo* gvdb, uchar chan, uchar4* outBuf )
{
int x = blockIdx.x * blockDim.x + threadIdx.x;
int y = blockIdx.y * blockDim.y + threadIdx.y;
if ( x >= scn.width || y >= scn.height ) return;
float4 clr = make_float4(1,1,1,0);
float3 norm;
float3 rdir = getViewRay(float(x + 0.5f) / float(scn.width), float(y + 0.5f) / float(scn.height));
// raytrace with cross-section plane
float3 wpos = getViewPos();
float t = rayPlaneIntersect ( wpos, rdir, SCN_SLICE_NORM, SCN_SLICE_PNT ); // hit section plane
if ( t > 0 ) { // yes..
wpos += t*rdir; // get point of surface
float3 offs, vmin; uint64 nid;
VDBNode* node = getNodeAtPoint ( gvdb, wpos, &offs, &vmin, &nid ); // find vdb node at point
if ( node != 0x0 ) {
//---- debugging: show apron
// float3 p = offs + (wpos-vmin)*(34.0/16.0) - make_float3(gvdb.atlas_apron);
// clr = transfer ( tex3D ( volTexIn, p.x, p.y, p.z ) );
t = getTrilinear ( gvdb, chan, wpos, offs, vmin ); // t <= voxel value
clr = transfer ( gvdb, t ); // clr at point on surface
if ( gvdb->clr_chan != CHAN_UNDEF ) {
float3 p = offs + (wpos - vmin);
clr *= make_float4( make_float3( getColor(gvdb, gvdb->clr_chan, p) ), 1.0 );
}
} else {
t = 0; // set t=0, no voxel value found
}
}
// 3D surface raytrace
float3 hit = make_float3(NOHIT,NOHIT,NOHIT);
float4 hclr = make_float4(1,1,1,1);
// using previous wpos (set above) to start ray, trace beyond section plane to get 3D surface hit
rayCast ( gvdb, chan, wpos, rdir, hit, norm, hclr, raySurfaceTrilinearBrick );
if ( hit.z != NOHIT) { // 3D surface hit..
float3 lightdir = normalize ( scn.light_pos - hit );
float ds = (t > SCN_THRESH) ? 1 : 0.8*max(0.0f, dot( norm, lightdir )); // if voxel value on section plane is inside surface, no diffuse shading
clr = lerp4( hclr * ds, clr, clr.w ); // blend 3D surface with cross-section clr
} else {
clr = lerp4( SCN_BACKCLR, clr, clr.w ); // no 3D hit. blend background with cross-section
}
outBuf [ y*scn.width + x ] = make_uchar4( clr.x*255, clr.y*255, clr.z*255, 255 );
}
// Render a section of the volume data in 2D
extern "C" __global__ void gvdbSection2D ( VDBInfo* gvdb, uchar chan, uchar4* outBuf )
{
int x = blockIdx.x * blockDim.x + threadIdx.x;
int y = blockIdx.y * blockDim.y + threadIdx.y;
if ( x >= scn.width || y >= scn.height ) return;
float4 clr = make_float4(1,1,1,0);
float3 bgclr = make_float3 ( 0, 0, 0 );
float3 wpos;
float3 spnt = make_float3( float(x)*2.0/scn.width - 1.0, 0, float(y)*2.0/scn.height - 1.0);
wpos = SCN_SLICE_PNT + spnt * SCN_SLICE_NORM;
// get leaf node at hit point
float3 offs, vmin;
uint64 nid;
VDBNode* node = getNodeAtPoint ( gvdb, wpos, &offs, &vmin, &nid );
if ( node == 0x0 ) { outBuf [ y*scn.width + x ] = make_uchar4(bgclr.x*255, bgclr.y*255, bgclr.z*255, 255); return; }
// get tricubic data value
clr = transfer ( gvdb, getTrilinear ( gvdb, chan, wpos, offs, vmin ) );
bgclr = lerp3 ( bgclr, make_float3(clr.x,clr.y,clr.z), clr.w );
outBuf [ y*scn.width + x ] = make_uchar4( bgclr.x*255, bgclr.y*255, bgclr.z*255, 255 );
} | 31dc86530bf307b98bf3705fab2302601d66ff58.cu |
//--------------------------------------------------------------------------------
// NVIDIA(R) GVDB VOXELS
// Copyright 2017, NVIDIA Corporation.
//
// Redistribution and use in source and binary forms, with or without modification,
// are permitted provided that the following conditions are met:
// 1. Redistributions of source code must retain the above copyright notice, this list of conditions and the following disclaimer.
// 2. Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the following disclaimer
// in the documentation and/or other materials provided with the distribution.
// 3. Neither the name of the copyright holder nor the names of its contributors may be used to endorse or promote products derived
// from this software without specific prior written permission.
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING,
// BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT
// SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
// DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
// INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE
// OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
//
// Version 1.0: Rama Hoetzlein, 5/1/2017
//----------------------------------------------------------------------------------
// File: cuda_gvdb_raycast.cu
//
// GVDB Raycasting
// - RayDeep - deep volume sampling
// - RaySurface - surface hit raycast
// - RayLevelSet - level set raycast
// - Raytrace - raytrace a bundle of arbitrary rays
// - Section3D - 3D cross-section render
// - Section2D - 2D cross-section render
//-----------------------------------------------
#include <stdio.h>
#include "cuda_math.cuh"
//----------------------------------------- GVDB Data Structure
#define CUDA_PATHWAY
#include "cuda_gvdb_scene.cuh" // GVDB Scene
#include "cuda_gvdb_nodes.cuh" // GVDB Node structure
#include "cuda_gvdb_geom.cuh" // GVDB Geom helpers
#include "cuda_gvdb_dda.cuh" // GVDB DDA
#include "cuda_gvdb_raycast.cuh" // GVDB Raycasting
#include "radixsort_kernel.cuh"
//-----------------------------------------
// Operator functions
#include "cuda_gvdb_operators.cuh"
// Particle functions
#include "cuda_gvdb_particles.cuh"
inline __device__ float4 performPhongShading( VDBInfo* gvdb, uchar chan, float3 shit, float3 snorm, float4 sclr, gvdbBrickFunc_t brickfunc )
{
if ( shit.z == NOHIT) // no surface hit
return SCN_BACKCLR;
// phong
float3 lightdir = normalize(scn.light_pos - shit);
float diff = 0.9 * max(0.0f, dot(snorm, lightdir) );
float amb = 0.1f;
// shadow ray
if (SCN_SHADOWAMT > 0) {
float3 hit2 = make_float3(0,0,NOHIT);
float4 hclr2 = make_float4(0,0,0,1);
float3 norm2;
rayCast ( gvdb, chan, shit + snorm * SCN_SHADOWBIAS, lightdir, hit2, norm2, hclr2, brickfunc ); // shadow ray
diff = (hit2.z==NOHIT ? diff : diff*(1.0-SCN_SHADOWAMT) );
}
return make_float4( fxyz(sclr) * (diff + amb), 1.0 );
}
// Raytracing functions
extern "C" __global__ void gvdbRayDeep ( VDBInfo* gvdb, uchar chan, uchar4* outBuf )
{
int x = blockIdx.x * blockDim.x + threadIdx.x;
int y = blockIdx.y * blockDim.y + threadIdx.y;
if ( x >= scn.width || y >= scn.height ) return;
float4 clr = make_float4(0,0,0,1);
float3 hit = make_float3(0,0,NOHIT);
float3 norm;
float3 rpos = getViewPos();
float3 rdir = getViewRay(float(x + 0.5f) / float(scn.width), float(y + 0.5f) / float(scn.height));
// ray deep sampling
rayCast ( gvdb, chan, rpos, rdir, hit, norm, clr, rayDeepBrick );
clr = make_float4( lerp3(SCN_BACKCLR, clr, 1.0-clr.w), 1.0-clr.w );
outBuf [ y*scn.width + x ] = make_uchar4( clr.x*255, clr.y*255, clr.z*255, clr.w*255 );
}
// Render the volume data by raycasting
extern "C" __global__ void gvdbRaySurfaceVoxel ( VDBInfo* gvdb, uchar chan, uchar4* outBuf )
{
int x = blockIdx.x * blockDim.x + threadIdx.x;
int y = blockIdx.y * blockDim.y + threadIdx.y;
if ( x >= scn.width || y >= scn.height ) return;
float3 hit = make_float3(NOHIT,NOHIT,NOHIT);
float3 norm;
float3 rpos = getViewPos();
float3 rdir = getViewRay(float(x + 0.5f) / float(scn.width), float(y + 0.5f) / float(scn.height));
float4 clr = make_float4(1,1,1,1);
// ray surface hit
rayCast ( gvdb, chan, rpos, rdir, hit, norm, clr, raySurfaceVoxelBrick );
clr = performPhongShading ( gvdb, chan, hit, norm, clr, raySurfaceVoxelBrick );
outBuf[y*scn.width + x] = make_uchar4(clr.x*255, clr.y*255, clr.z*255, clr.w*255);
}
// Render the volume data by raycasting
extern "C" __global__ void gvdbRaySurfaceTrilinear ( VDBInfo* gvdb, uchar chan, uchar4* outBuf )
{
int x = blockIdx.x * blockDim.x + threadIdx.x;
int y = blockIdx.y * blockDim.y + threadIdx.y;
if ( x >= scn.width || y >= scn.height ) return;
float3 hit = make_float3(NOHIT,NOHIT,NOHIT);
float3 norm;
float3 rpos = getViewPos();
float3 rdir = getViewRay(float(x + 0.5f) / float(scn.width), float(y + 0.5f) / float(scn.height));
float4 clr = make_float4(1,1,1,1);
// ray surface hit
rayCast ( gvdb, chan, rpos, rdir, hit, norm, clr, raySurfaceTrilinearBrick );
clr = performPhongShading ( gvdb, chan, hit, norm, clr, raySurfaceTrilinearBrick );
outBuf[y*scn.width + x] = make_uchar4(clr.x*255, clr.y*255, clr.z*255, clr.w*255);
}
// Render the volume data by raycasting
extern "C" __global__ void gvdbRaySurfaceTricubic ( VDBInfo* gvdb, uchar chan, uchar4* outBuf )
{
int x = blockIdx.x * blockDim.x + threadIdx.x;
int y = blockIdx.y * blockDim.y + threadIdx.y;
if ( x >= scn.width || y >= scn.height ) return;
float3 hit = make_float3(NOHIT,NOHIT,NOHIT);
float3 norm;
float3 rpos = getViewPos();
float3 rdir = getViewRay(float(x + 0.5f) / float(scn.width), float(y + 0.5f) / float(scn.height));
float4 clr = make_float4(1,1,1,1);
// ray surface hit
rayCast ( gvdb, chan, rpos, rdir, hit, norm, clr, raySurfaceTricubicBrick );
clr = performPhongShading ( gvdb, chan, hit, norm, clr, raySurfaceTrilinearBrick );
outBuf[y*scn.width + x] = make_uchar4(clr.x*255, clr.y*255, clr.z*255, clr.w*255);
}
// Render the volume data by raycasting
extern "C" __global__ void gvdbRaySurfaceDepth ( VDBInfo* gvdb, uchar chan, uchar4* outBuf )
{
int x = blockIdx.x * blockDim.x + threadIdx.x;
int y = blockIdx.y * blockDim.y + threadIdx.y;
if ( x >= scn.width || y >= scn.height ) return;
float3 hit = make_float3(NOHIT,NOHIT,NOHIT);
float3 norm;
float3 rpos = getViewPos();
float3 rdir = getViewRay(float(x + 0.5f) / float(scn.width), float(y + 0.5f) / float(scn.height));
float4 clr = make_float4(1,1,1,1);
// ray surface hit
// *NOTE*: raySurfaceDepthBrick not yet implemented
rayCast (gvdb, chan, rpos, rdir, hit, norm, clr, raySurfaceTrilinearBrick );
clr = performPhongShading ( gvdb, chan, hit, norm, clr, raySurfaceTrilinearBrick );
outBuf[y*scn.width + x] = make_uchar4(clr.x*255, clr.y*255, clr.z*255, clr.w*255 );
}
// Render the volume data by raycasting
extern "C" __global__ void gvdbRayLevelSet ( VDBInfo* gvdb, uchar chan, uchar4* outBuf )
{
int x = blockIdx.x * blockDim.x + threadIdx.x;
int y = blockIdx.y * blockDim.y + threadIdx.y;
if ( x >= scn.width || y >= scn.height ) return;
float3 hit = make_float3(0,0,NOHIT);
float4 clr = make_float4(1,1,1,1);
float3 norm;
float3 rpos = getViewPos();
float3 rdir = getViewRay(float(x + 0.5f) / float(scn.width), float(y + 0.5f) / float(scn.height));
// Raycast Level Set
rayCast ( gvdb, chan, rpos, rdir, hit, norm, clr, rayLevelSetBrick );
clr = performPhongShading ( gvdb, chan, hit, norm, clr, rayLevelSetBrick );
outBuf [ y*scn.width + x ] = make_uchar4( clr.x*255, clr.y*255, clr.z*255, clr.w*255 );
}
// Render the volume data by raycasting
extern "C" __global__ void gvdbRayEmptySkip ( VDBInfo* gvdb, uchar chan, uchar4* outBuf )
{
int x = blockIdx.x * blockDim.x + threadIdx.x;
int y = blockIdx.y * blockDim.y + threadIdx.y;
if ( x >= scn.width || y >= scn.height ) return;
float4 clr = make_float4(1,1,1,1);
float3 hit = make_float3(NOHIT,NOHIT,NOHIT);
float3 norm;
float3 rpos = getViewPos();
float3 rdir = getViewRay(float(x + 0.5f) / float(scn.width), float(y + 0.5f) / float(scn.height));
// Empty skipping
rayCast ( gvdb, chan, rpos, rdir, hit, norm, clr, rayEmptySkipBrick );
if ( hit.z != NOHIT) {
clr = make_float4( hit * 0.01, 1 );
} else {
clr = SCN_BACKCLR;
}
outBuf [ y*scn.width + x ] = make_uchar4( clr.x*255, clr.y*255, clr.z*255, 255 );
}
// Raytrace a bundle of rays
extern "C" __global__ void gvdbRaytrace ( VDBInfo* gvdb, uchar chan, int num_rays, ScnRay* rays, float bias )
{
int x = blockIdx.x * blockDim.x + threadIdx.x;
if ( x >= num_rays ) return;
// raytrace
rays[x].hit = make_float3(NOHIT, NOHIT, NOHIT);
float4 hclr = make_float4(1,1,1,1);
rayCast ( gvdb, chan, rays[x].orig, rays[x].dir, rays[x].hit, rays[x].normal, hclr, raySurfaceTrilinearBrick );
if ( rays[x].hit.z != NOHIT ) rays[x].hit -= rays[x].dir * bias;
}
// Render a cross section of the volume data in 3D
extern "C" __global__ void gvdbSection3D ( VDBInfo* gvdb, uchar chan, uchar4* outBuf )
{
int x = blockIdx.x * blockDim.x + threadIdx.x;
int y = blockIdx.y * blockDim.y + threadIdx.y;
if ( x >= scn.width || y >= scn.height ) return;
float4 clr = make_float4(1,1,1,0);
float3 norm;
float3 rdir = getViewRay(float(x + 0.5f) / float(scn.width), float(y + 0.5f) / float(scn.height));
// raytrace with cross-section plane
float3 wpos = getViewPos();
float t = rayPlaneIntersect ( wpos, rdir, SCN_SLICE_NORM, SCN_SLICE_PNT ); // hit section plane
if ( t > 0 ) { // yes..
wpos += t*rdir; // get point of surface
float3 offs, vmin; uint64 nid;
VDBNode* node = getNodeAtPoint ( gvdb, wpos, &offs, &vmin, &nid ); // find vdb node at point
if ( node != 0x0 ) {
//---- debugging: show apron
// float3 p = offs + (wpos-vmin)*(34.0/16.0) - make_float3(gvdb.atlas_apron);
// clr = transfer ( tex3D ( volTexIn, p.x, p.y, p.z ) );
t = getTrilinear ( gvdb, chan, wpos, offs, vmin ); // t <= voxel value
clr = transfer ( gvdb, t ); // clr at point on surface
if ( gvdb->clr_chan != CHAN_UNDEF ) {
float3 p = offs + (wpos - vmin);
clr *= make_float4( make_float3( getColor(gvdb, gvdb->clr_chan, p) ), 1.0 );
}
} else {
t = 0; // set t=0, no voxel value found
}
}
// 3D surface raytrace
float3 hit = make_float3(NOHIT,NOHIT,NOHIT);
float4 hclr = make_float4(1,1,1,1);
// using previous wpos (set above) to start ray, trace beyond section plane to get 3D surface hit
rayCast ( gvdb, chan, wpos, rdir, hit, norm, hclr, raySurfaceTrilinearBrick );
if ( hit.z != NOHIT) { // 3D surface hit..
float3 lightdir = normalize ( scn.light_pos - hit );
float ds = (t > SCN_THRESH) ? 1 : 0.8*max(0.0f, dot( norm, lightdir )); // if voxel value on section plane is inside surface, no diffuse shading
clr = lerp4( hclr * ds, clr, clr.w ); // blend 3D surface with cross-section clr
} else {
clr = lerp4( SCN_BACKCLR, clr, clr.w ); // no 3D hit. blend background with cross-section
}
outBuf [ y*scn.width + x ] = make_uchar4( clr.x*255, clr.y*255, clr.z*255, 255 );
}
// Render a section of the volume data in 2D
extern "C" __global__ void gvdbSection2D ( VDBInfo* gvdb, uchar chan, uchar4* outBuf )
{
int x = blockIdx.x * blockDim.x + threadIdx.x;
int y = blockIdx.y * blockDim.y + threadIdx.y;
if ( x >= scn.width || y >= scn.height ) return;
float4 clr = make_float4(1,1,1,0);
float3 bgclr = make_float3 ( 0, 0, 0 );
float3 wpos;
float3 spnt = make_float3( float(x)*2.0/scn.width - 1.0, 0, float(y)*2.0/scn.height - 1.0);
wpos = SCN_SLICE_PNT + spnt * SCN_SLICE_NORM;
// get leaf node at hit point
float3 offs, vmin;
uint64 nid;
VDBNode* node = getNodeAtPoint ( gvdb, wpos, &offs, &vmin, &nid );
if ( node == 0x0 ) { outBuf [ y*scn.width + x ] = make_uchar4(bgclr.x*255, bgclr.y*255, bgclr.z*255, 255); return; }
// get tricubic data value
clr = transfer ( gvdb, getTrilinear ( gvdb, chan, wpos, offs, vmin ) );
bgclr = lerp3 ( bgclr, make_float3(clr.x,clr.y,clr.z), clr.w );
outBuf [ y*scn.width + x ] = make_uchar4( bgclr.x*255, bgclr.y*255, bgclr.z*255, 255 );
} |
ef5cff4850091e06e6658e71af90e343a0e4d53e.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
// Homework 1
// Color to Greyscale Conversion
//A common way to represent color images is known as RGBA - the color
//is specified by how much Red, Grean and Blue is in it.
//The 'A' stands for Alpha and is used for transparency, it will be
//ignored in this homework.
//Each channel Red, Blue, Green and Alpha is represented by one byte.
//Since we are using one byte for each color there are 256 different
//possible values for each color. This means we use 4 bytes per pixel.
//Greyscale images are represented by a single intensity value per pixel
//which is one byte in size.
//To convert an image from color to grayscale one simple method is to
//set the intensity to the average of the RGB channels. But we will
//use a more sophisticated method that takes into account how the eye
//perceives color and weights the channels unequally.
//The eye responds most strongly to green followed by red and then blue.
//The NTSC (National Television System Committee) recommends the following
//formula for color to greyscale conversion:
//I = .299f * R + .587f * G + .114f * B
//Notice the trailing f's on the numbers which indicate that they are
//single precision floating point constants and not double precision
//constants.
//You should fill in the kernel as well as set the block and grid sizes
//so that the entire image is processed.
#include "utils.h"
__global__
void rgba_to_greyscale(const uchar4* const rgbaImage,
unsigned char* const greyImage,
int numRows, int numCols)
{
//TODO
//Fill in the kernel to convert from color to greyscale
//the mapping from components of a uchar4 to RGBA is:
// .x -> R ; .y -> G ; .z -> B ; .w -> A
//
//The output (greyImage) at each pixel should be the result of
//applying the formula: output = .299f * R + .587f * G + .114f * B;
//Note: We will be ignoring the alpha channel for this conversion
//First create a mapping from the 2D block and grid locations
//to an absolute 2D location in the image, then use that to
//calculate a 1D offset
uchar4 rgba = rgbaImage[numCols * blockIdx.x + blockIdx.y];
greyImage[numCols * blockIdx.x + blockIdx.y] = .299f * rgba.x + .587f * rgba.y + .114f * rgba.z;
}
void your_rgba_to_greyscale(const uchar4 * const h_rgbaImage, uchar4 * const d_rgbaImage,
unsigned char* const d_greyImage, size_t numRows, size_t numCols)
{
//You must fill in the correct sizes for the blockSize and gridSize
//currently only one block with one thread is being launched
const dim3 blockSize(1, 1, 1); //TODO
const dim3 gridSize( numRows, numCols, 1); //TODO
hipLaunchKernelGGL(( rgba_to_greyscale), dim3(gridSize), dim3(blockSize), 0, 0, d_rgbaImage, d_greyImage, numRows, numCols);
hipDeviceSynchronize(); checkCudaErrors(hipGetLastError());
}
| ef5cff4850091e06e6658e71af90e343a0e4d53e.cu | // Homework 1
// Color to Greyscale Conversion
//A common way to represent color images is known as RGBA - the color
//is specified by how much Red, Grean and Blue is in it.
//The 'A' stands for Alpha and is used for transparency, it will be
//ignored in this homework.
//Each channel Red, Blue, Green and Alpha is represented by one byte.
//Since we are using one byte for each color there are 256 different
//possible values for each color. This means we use 4 bytes per pixel.
//Greyscale images are represented by a single intensity value per pixel
//which is one byte in size.
//To convert an image from color to grayscale one simple method is to
//set the intensity to the average of the RGB channels. But we will
//use a more sophisticated method that takes into account how the eye
//perceives color and weights the channels unequally.
//The eye responds most strongly to green followed by red and then blue.
//The NTSC (National Television System Committee) recommends the following
//formula for color to greyscale conversion:
//I = .299f * R + .587f * G + .114f * B
//Notice the trailing f's on the numbers which indicate that they are
//single precision floating point constants and not double precision
//constants.
//You should fill in the kernel as well as set the block and grid sizes
//so that the entire image is processed.
#include "utils.h"
__global__
void rgba_to_greyscale(const uchar4* const rgbaImage,
unsigned char* const greyImage,
int numRows, int numCols)
{
//TODO
//Fill in the kernel to convert from color to greyscale
//the mapping from components of a uchar4 to RGBA is:
// .x -> R ; .y -> G ; .z -> B ; .w -> A
//
//The output (greyImage) at each pixel should be the result of
//applying the formula: output = .299f * R + .587f * G + .114f * B;
//Note: We will be ignoring the alpha channel for this conversion
//First create a mapping from the 2D block and grid locations
//to an absolute 2D location in the image, then use that to
//calculate a 1D offset
uchar4 rgba = rgbaImage[numCols * blockIdx.x + blockIdx.y];
greyImage[numCols * blockIdx.x + blockIdx.y] = .299f * rgba.x + .587f * rgba.y + .114f * rgba.z;
}
void your_rgba_to_greyscale(const uchar4 * const h_rgbaImage, uchar4 * const d_rgbaImage,
unsigned char* const d_greyImage, size_t numRows, size_t numCols)
{
//You must fill in the correct sizes for the blockSize and gridSize
//currently only one block with one thread is being launched
const dim3 blockSize(1, 1, 1); //TODO
const dim3 gridSize( numRows, numCols, 1); //TODO
rgba_to_greyscale<<<gridSize, blockSize>>>(d_rgbaImage, d_greyImage, numRows, numCols);
cudaDeviceSynchronize(); checkCudaErrors(cudaGetLastError());
}
|
15c006f61f89e00a547178f7989b4797d6241071.hip | // !!! This is a file automatically generated by hipify!!!
/*
* Copyright (c) 2019-2020, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include <cudf/column/column_device_view.cuh>
#include <cudf/column/column_factories.hpp>
#include <cudf/detail/nvtx/ranges.hpp>
#include <cudf/strings/detail/replace.hpp>
#include <cudf/strings/detail/utilities.hpp>
#include <cudf/strings/replace.hpp>
#include <cudf/strings/string_view.cuh>
#include <cudf/strings/strings_column_view.hpp>
#include <strings/utilities.cuh>
#include <strings/utilities.hpp>
namespace cudf {
namespace strings {
namespace detail {
namespace {
/**
* @brief Used as template parameter to divide size calculation from
* the actual string operation within a function.
*
* Useful when most of the logic is identical for both passes.
*/
enum class two_pass {
SIZE_ONLY = 0, ///< calculate the size only
EXECUTE_OP ///< run the string operation
};
/**
* @brief Function logic for the replace API.
*
* This will perform a replace operation on each string.
*/
template <two_pass Pass = two_pass::SIZE_ONLY>
struct replace_fn {
column_device_view const d_strings;
string_view const d_target;
string_view const d_repl;
int32_t max_repl;
const int32_t* d_offsets{};
char* d_chars{};
__device__ size_type operator()(size_type idx)
{
if (d_strings.is_null(idx)) return 0; // null string
string_view d_str = d_strings.element<string_view>(idx);
auto max_n = max_repl;
if (max_n < 0) max_n = d_str.length(); // max possible replacements
char* out_ptr = nullptr;
if (Pass == two_pass::EXECUTE_OP) out_ptr = d_chars + d_offsets[idx];
const char* in_ptr = d_str.data();
size_type bytes = d_str.size_bytes();
auto position = d_str.find(d_target);
size_type last_pos = 0;
while ((position >= 0) && (max_n > 0)) {
if (Pass == two_pass::SIZE_ONLY)
bytes += d_repl.size_bytes() - d_target.size_bytes();
else // EXECUTE_OP
{
size_type curr_pos = d_str.byte_offset(position);
out_ptr = copy_and_increment(out_ptr, in_ptr + last_pos, curr_pos - last_pos); // copy left
out_ptr = copy_string(out_ptr, d_repl); // copy repl
last_pos = curr_pos + d_target.size_bytes();
}
position = d_str.find(d_target, position + d_target.size_bytes());
--max_n;
}
if (Pass == two_pass::EXECUTE_OP) // copy whats left (or right depending on your point of view)
memcpy(out_ptr, in_ptr + last_pos, d_str.size_bytes() - last_pos);
return bytes;
}
};
} // namespace
//
std::unique_ptr<column> replace(strings_column_view const& strings,
string_scalar const& target,
string_scalar const& repl,
int32_t maxrepl,
rmm::mr::device_memory_resource* mr,
hipStream_t stream)
{
size_type strings_count = strings.size();
if (strings_count == 0) return make_empty_strings_column(mr, stream);
CUDF_EXPECTS(repl.is_valid(), "Parameter repl must be valid.");
CUDF_EXPECTS(target.is_valid(), "Parameter target must be valid.");
CUDF_EXPECTS(target.size() > 0, "Parameter target must not be empty string.");
string_view d_target(target.data(), target.size());
string_view d_repl(repl.data(), repl.size());
auto strings_column = column_device_view::create(strings.parent(), stream);
auto d_strings = *strings_column;
// copy the null mask
rmm::device_buffer null_mask = copy_bitmask(strings.parent(), stream, mr);
// build offsets column
auto offsets_transformer_itr = thrust::make_transform_iterator(
thrust::make_counting_iterator<int32_t>(0),
replace_fn<two_pass::SIZE_ONLY>{d_strings, d_target, d_repl, maxrepl});
auto offsets_column = make_offsets_child_column(
offsets_transformer_itr, offsets_transformer_itr + strings_count, mr, stream);
auto d_offsets = offsets_column->view().data<int32_t>();
// build chars column
size_type bytes = thrust::device_pointer_cast(d_offsets)[strings_count];
auto chars_column =
create_chars_child_column(strings_count, strings.null_count(), bytes, mr, stream);
auto d_chars = chars_column->mutable_view().data<char>();
thrust::for_each_n(
rmm::exec_policy(stream)->on(stream),
thrust::make_counting_iterator<size_type>(0),
strings_count,
replace_fn<two_pass::EXECUTE_OP>{d_strings, d_target, d_repl, maxrepl, d_offsets, d_chars});
//
return make_strings_column(strings_count,
std::move(offsets_column),
std::move(chars_column),
strings.null_count(),
std::move(null_mask),
stream,
mr);
}
namespace {
/**
* @brief Function logic for the replace_slice API.
*
* This will perform a replace_slice operation on each string.
*/
template <two_pass Pass = two_pass::SIZE_ONLY>
struct replace_slice_fn {
column_device_view const d_strings;
string_view const d_repl;
size_type start, stop;
const int32_t* d_offsets{};
char* d_chars{};
__device__ size_type operator()(size_type idx)
{
if (d_strings.is_null(idx)) return 0; // null string
string_view d_str = d_strings.element<string_view>(idx);
auto length = d_str.length();
char* out_ptr = nullptr;
if (Pass == two_pass::EXECUTE_OP) out_ptr = d_chars + d_offsets[idx];
const char* in_ptr = d_str.data();
size_type bytes = d_str.size_bytes();
size_type begin = ((start < 0) || (start > length) ? length : start);
size_type end = ((stop < 0) || (stop > length) ? length : stop);
begin = d_str.byte_offset(begin);
end = d_str.byte_offset(end);
bytes += d_repl.size_bytes() - (end - begin);
if (Pass == two_pass::EXECUTE_OP) {
out_ptr = copy_and_increment(out_ptr, in_ptr, begin);
out_ptr = copy_string(out_ptr, d_repl);
out_ptr = copy_and_increment(out_ptr, in_ptr + end, d_str.size_bytes() - end);
}
return bytes;
}
};
} // namespace
std::unique_ptr<column> replace_slice(strings_column_view const& strings,
string_scalar const& repl,
size_type start,
size_type stop,
rmm::mr::device_memory_resource* mr,
hipStream_t stream)
{
size_type strings_count = strings.size();
if (strings_count == 0) return make_empty_strings_column(mr, stream);
CUDF_EXPECTS(repl.is_valid(), "Parameter repl must be valid.");
if (stop > 0) CUDF_EXPECTS(start <= stop, "Parameter start must be less than or equal to stop.");
string_view d_repl(repl.data(), repl.size());
auto strings_column = column_device_view::create(strings.parent(), stream);
auto d_strings = *strings_column;
// copy the null mask
rmm::device_buffer null_mask = copy_bitmask(strings.parent(), stream, mr);
// build offsets column
auto offsets_transformer_itr = thrust::make_transform_iterator(
thrust::make_counting_iterator<int32_t>(0),
replace_slice_fn<two_pass::SIZE_ONLY>{d_strings, d_repl, start, stop});
auto offsets_column = make_offsets_child_column(
offsets_transformer_itr, offsets_transformer_itr + strings_count, mr, stream);
auto offsets_view = offsets_column->view();
auto d_offsets = offsets_view.data<int32_t>();
// build chars column
size_type bytes = thrust::device_pointer_cast(d_offsets)[strings_count];
auto chars_column =
create_chars_child_column(strings_count, strings.null_count(), bytes, mr, stream);
auto chars_view = chars_column->mutable_view();
auto d_chars = chars_view.data<char>();
thrust::for_each_n(
rmm::exec_policy(stream)->on(stream),
thrust::make_counting_iterator<size_type>(0),
strings_count,
replace_slice_fn<two_pass::EXECUTE_OP>{d_strings, d_repl, start, stop, d_offsets, d_chars});
//
return make_strings_column(strings_count,
std::move(offsets_column),
std::move(chars_column),
strings.null_count(),
std::move(null_mask),
stream,
mr);
}
namespace {
/**
* @brief Function logic for the replace_multi API.
*
* This will perform the multi-replace operation on each string.
*/
template <two_pass Pass = two_pass::SIZE_ONLY>
struct replace_multi_fn {
column_device_view const d_strings;
column_device_view const d_targets;
column_device_view const d_repls;
const int32_t* d_offsets{};
char* d_chars{};
__device__ size_type operator()(size_type idx)
{
if (d_strings.is_null(idx)) return 0;
string_view d_str = d_strings.element<string_view>(idx);
char* out_ptr = nullptr;
if (Pass == two_pass::EXECUTE_OP) out_ptr = d_chars + d_offsets[idx];
const char* in_ptr = d_str.data();
size_type size = d_str.size_bytes();
size_type bytes = size, spos = 0, lpos = 0;
while (spos < size) { // check each character against each target
for (int tgt_idx = 0; tgt_idx < d_targets.size(); ++tgt_idx) {
string_view d_tgt = d_targets.element<string_view>(tgt_idx);
if ((d_tgt.size_bytes() <= (size - spos)) && // check fit
(d_tgt.compare(in_ptr + spos, d_tgt.size_bytes()) == 0)) // does it match
{ // found one
string_view d_repl;
if (d_repls.size() == 1)
d_repl = d_repls.element<string_view>(0);
else
d_repl = d_repls.element<string_view>(tgt_idx);
if (Pass == two_pass::SIZE_ONLY)
bytes += d_repl.size_bytes() - d_tgt.size_bytes();
else {
out_ptr = copy_and_increment(out_ptr, in_ptr + lpos, spos - lpos);
out_ptr = copy_string(out_ptr, d_repl);
lpos = spos + d_tgt.size_bytes();
}
spos += d_tgt.size_bytes() - 1;
break;
}
}
++spos;
}
if (Pass == two_pass::EXECUTE_OP) // copy remainder
memcpy(out_ptr, in_ptr + lpos, size - lpos);
return bytes;
}
};
} // namespace
std::unique_ptr<column> replace(strings_column_view const& strings,
strings_column_view const& targets,
strings_column_view const& repls,
rmm::mr::device_memory_resource* mr,
hipStream_t stream)
{
auto strings_count = strings.size();
if (strings_count == 0) return make_empty_strings_column(mr, stream);
CUDF_EXPECTS(((targets.size() > 0) && (targets.null_count() == 0)),
"Parameters targets must not be empty and must not have nulls");
CUDF_EXPECTS(((repls.size() > 0) && (repls.null_count() == 0)),
"Parameters repls must not be empty and must not have nulls");
if (repls.size() > 1)
CUDF_EXPECTS(repls.size() == targets.size(), "Sizes for targets and repls must match");
auto strings_column = column_device_view::create(strings.parent(), stream);
auto d_strings = *strings_column;
auto targets_column = column_device_view::create(targets.parent(), stream);
auto d_targets = *targets_column;
auto repls_column = column_device_view::create(repls.parent(), stream);
auto d_repls = *repls_column;
// copy the null mask
rmm::device_buffer null_mask = copy_bitmask(strings.parent(), stream, mr);
// build offsets column
auto offsets_transformer_itr = thrust::make_transform_iterator(
thrust::make_counting_iterator<int32_t>(0),
replace_multi_fn<two_pass::SIZE_ONLY>{d_strings, d_targets, d_repls});
auto offsets_column = make_offsets_child_column(
offsets_transformer_itr, offsets_transformer_itr + strings_count, mr, stream);
auto d_offsets = offsets_column->view().data<int32_t>();
// build chars column
size_type bytes = thrust::device_pointer_cast(d_offsets)[strings_count];
auto chars_column =
create_chars_child_column(strings_count, strings.null_count(), bytes, mr, stream);
auto d_chars = chars_column->mutable_view().data<char>();
thrust::for_each_n(
rmm::exec_policy(stream)->on(stream),
thrust::make_counting_iterator<size_type>(0),
strings_count,
replace_multi_fn<two_pass::EXECUTE_OP>{d_strings, d_targets, d_repls, d_offsets, d_chars});
//
return make_strings_column(strings_count,
std::move(offsets_column),
std::move(chars_column),
strings.null_count(),
std::move(null_mask),
stream,
mr);
}
std::unique_ptr<column> replace_nulls(strings_column_view const& strings,
string_scalar const& repl,
rmm::mr::device_memory_resource* mr,
hipStream_t stream)
{
size_type strings_count = strings.size();
if (strings_count == 0) return make_empty_strings_column(mr, stream);
CUDF_EXPECTS(repl.is_valid(), "Parameter repl must be valid.");
string_view d_repl(repl.data(), repl.size());
auto strings_column = column_device_view::create(strings.parent(), stream);
auto d_strings = *strings_column;
// build offsets column
auto offsets_transformer_itr = thrust::make_transform_iterator(
thrust::make_counting_iterator<int32_t>(0), [d_strings, d_repl] __device__(size_type idx) {
return d_strings.is_null(idx) ? d_repl.size_bytes()
: d_strings.element<string_view>(idx).size_bytes();
});
auto offsets_column = make_offsets_child_column(
offsets_transformer_itr, offsets_transformer_itr + strings_count, mr, stream);
auto d_offsets = offsets_column->view().data<int32_t>();
// build chars column
size_type bytes = thrust::device_pointer_cast(d_offsets)[strings_count];
auto chars_column = strings::detail::create_chars_child_column(
strings_count, strings.null_count(), bytes, mr, stream);
auto d_chars = chars_column->mutable_view().data<char>();
thrust::for_each_n(rmm::exec_policy(stream)->on(stream),
thrust::make_counting_iterator<size_type>(0),
strings_count,
[d_strings, d_repl, d_offsets, d_chars] __device__(size_type idx) {
string_view d_str = d_repl;
if (!d_strings.is_null(idx)) d_str = d_strings.element<string_view>(idx);
memcpy(d_chars + d_offsets[idx], d_str.data(), d_str.size_bytes());
});
//
return make_strings_column(strings_count,
std::move(offsets_column),
std::move(chars_column),
0,
rmm::device_buffer{0, stream, mr},
stream,
mr);
}
} // namespace detail
// external API
std::unique_ptr<column> replace(strings_column_view const& strings,
string_scalar const& target,
string_scalar const& repl,
int32_t maxrepl,
rmm::mr::device_memory_resource* mr)
{
CUDF_FUNC_RANGE();
return detail::replace(strings, target, repl, maxrepl, mr);
}
std::unique_ptr<column> replace_slice(strings_column_view const& strings,
string_scalar const& repl,
size_type start,
size_type stop,
rmm::mr::device_memory_resource* mr)
{
CUDF_FUNC_RANGE();
return detail::replace_slice(strings, repl, start, stop, mr);
}
std::unique_ptr<column> replace(strings_column_view const& strings,
strings_column_view const& targets,
strings_column_view const& repls,
rmm::mr::device_memory_resource* mr)
{
CUDF_FUNC_RANGE();
return detail::replace(strings, targets, repls, mr);
}
std::unique_ptr<column> replace_nulls(strings_column_view const& strings,
string_scalar const& repl,
rmm::mr::device_memory_resource* mr)
{
CUDF_FUNC_RANGE();
return detail::replace_nulls(strings, repl, mr);
}
} // namespace strings
} // namespace cudf
| 15c006f61f89e00a547178f7989b4797d6241071.cu | /*
* Copyright (c) 2019-2020, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include <cudf/column/column_device_view.cuh>
#include <cudf/column/column_factories.hpp>
#include <cudf/detail/nvtx/ranges.hpp>
#include <cudf/strings/detail/replace.hpp>
#include <cudf/strings/detail/utilities.hpp>
#include <cudf/strings/replace.hpp>
#include <cudf/strings/string_view.cuh>
#include <cudf/strings/strings_column_view.hpp>
#include <strings/utilities.cuh>
#include <strings/utilities.hpp>
namespace cudf {
namespace strings {
namespace detail {
namespace {
/**
* @brief Used as template parameter to divide size calculation from
* the actual string operation within a function.
*
* Useful when most of the logic is identical for both passes.
*/
enum class two_pass {
SIZE_ONLY = 0, ///< calculate the size only
EXECUTE_OP ///< run the string operation
};
/**
* @brief Function logic for the replace API.
*
* This will perform a replace operation on each string.
*/
template <two_pass Pass = two_pass::SIZE_ONLY>
struct replace_fn {
column_device_view const d_strings;
string_view const d_target;
string_view const d_repl;
int32_t max_repl;
const int32_t* d_offsets{};
char* d_chars{};
__device__ size_type operator()(size_type idx)
{
if (d_strings.is_null(idx)) return 0; // null string
string_view d_str = d_strings.element<string_view>(idx);
auto max_n = max_repl;
if (max_n < 0) max_n = d_str.length(); // max possible replacements
char* out_ptr = nullptr;
if (Pass == two_pass::EXECUTE_OP) out_ptr = d_chars + d_offsets[idx];
const char* in_ptr = d_str.data();
size_type bytes = d_str.size_bytes();
auto position = d_str.find(d_target);
size_type last_pos = 0;
while ((position >= 0) && (max_n > 0)) {
if (Pass == two_pass::SIZE_ONLY)
bytes += d_repl.size_bytes() - d_target.size_bytes();
else // EXECUTE_OP
{
size_type curr_pos = d_str.byte_offset(position);
out_ptr = copy_and_increment(out_ptr, in_ptr + last_pos, curr_pos - last_pos); // copy left
out_ptr = copy_string(out_ptr, d_repl); // copy repl
last_pos = curr_pos + d_target.size_bytes();
}
position = d_str.find(d_target, position + d_target.size_bytes());
--max_n;
}
if (Pass == two_pass::EXECUTE_OP) // copy whats left (or right depending on your point of view)
memcpy(out_ptr, in_ptr + last_pos, d_str.size_bytes() - last_pos);
return bytes;
}
};
} // namespace
//
std::unique_ptr<column> replace(strings_column_view const& strings,
string_scalar const& target,
string_scalar const& repl,
int32_t maxrepl,
rmm::mr::device_memory_resource* mr,
cudaStream_t stream)
{
size_type strings_count = strings.size();
if (strings_count == 0) return make_empty_strings_column(mr, stream);
CUDF_EXPECTS(repl.is_valid(), "Parameter repl must be valid.");
CUDF_EXPECTS(target.is_valid(), "Parameter target must be valid.");
CUDF_EXPECTS(target.size() > 0, "Parameter target must not be empty string.");
string_view d_target(target.data(), target.size());
string_view d_repl(repl.data(), repl.size());
auto strings_column = column_device_view::create(strings.parent(), stream);
auto d_strings = *strings_column;
// copy the null mask
rmm::device_buffer null_mask = copy_bitmask(strings.parent(), stream, mr);
// build offsets column
auto offsets_transformer_itr = thrust::make_transform_iterator(
thrust::make_counting_iterator<int32_t>(0),
replace_fn<two_pass::SIZE_ONLY>{d_strings, d_target, d_repl, maxrepl});
auto offsets_column = make_offsets_child_column(
offsets_transformer_itr, offsets_transformer_itr + strings_count, mr, stream);
auto d_offsets = offsets_column->view().data<int32_t>();
// build chars column
size_type bytes = thrust::device_pointer_cast(d_offsets)[strings_count];
auto chars_column =
create_chars_child_column(strings_count, strings.null_count(), bytes, mr, stream);
auto d_chars = chars_column->mutable_view().data<char>();
thrust::for_each_n(
rmm::exec_policy(stream)->on(stream),
thrust::make_counting_iterator<size_type>(0),
strings_count,
replace_fn<two_pass::EXECUTE_OP>{d_strings, d_target, d_repl, maxrepl, d_offsets, d_chars});
//
return make_strings_column(strings_count,
std::move(offsets_column),
std::move(chars_column),
strings.null_count(),
std::move(null_mask),
stream,
mr);
}
namespace {
/**
* @brief Function logic for the replace_slice API.
*
* This will perform a replace_slice operation on each string.
*/
template <two_pass Pass = two_pass::SIZE_ONLY>
struct replace_slice_fn {
column_device_view const d_strings;
string_view const d_repl;
size_type start, stop;
const int32_t* d_offsets{};
char* d_chars{};
__device__ size_type operator()(size_type idx)
{
if (d_strings.is_null(idx)) return 0; // null string
string_view d_str = d_strings.element<string_view>(idx);
auto length = d_str.length();
char* out_ptr = nullptr;
if (Pass == two_pass::EXECUTE_OP) out_ptr = d_chars + d_offsets[idx];
const char* in_ptr = d_str.data();
size_type bytes = d_str.size_bytes();
size_type begin = ((start < 0) || (start > length) ? length : start);
size_type end = ((stop < 0) || (stop > length) ? length : stop);
begin = d_str.byte_offset(begin);
end = d_str.byte_offset(end);
bytes += d_repl.size_bytes() - (end - begin);
if (Pass == two_pass::EXECUTE_OP) {
out_ptr = copy_and_increment(out_ptr, in_ptr, begin);
out_ptr = copy_string(out_ptr, d_repl);
out_ptr = copy_and_increment(out_ptr, in_ptr + end, d_str.size_bytes() - end);
}
return bytes;
}
};
} // namespace
std::unique_ptr<column> replace_slice(strings_column_view const& strings,
string_scalar const& repl,
size_type start,
size_type stop,
rmm::mr::device_memory_resource* mr,
cudaStream_t stream)
{
size_type strings_count = strings.size();
if (strings_count == 0) return make_empty_strings_column(mr, stream);
CUDF_EXPECTS(repl.is_valid(), "Parameter repl must be valid.");
if (stop > 0) CUDF_EXPECTS(start <= stop, "Parameter start must be less than or equal to stop.");
string_view d_repl(repl.data(), repl.size());
auto strings_column = column_device_view::create(strings.parent(), stream);
auto d_strings = *strings_column;
// copy the null mask
rmm::device_buffer null_mask = copy_bitmask(strings.parent(), stream, mr);
// build offsets column
auto offsets_transformer_itr = thrust::make_transform_iterator(
thrust::make_counting_iterator<int32_t>(0),
replace_slice_fn<two_pass::SIZE_ONLY>{d_strings, d_repl, start, stop});
auto offsets_column = make_offsets_child_column(
offsets_transformer_itr, offsets_transformer_itr + strings_count, mr, stream);
auto offsets_view = offsets_column->view();
auto d_offsets = offsets_view.data<int32_t>();
// build chars column
size_type bytes = thrust::device_pointer_cast(d_offsets)[strings_count];
auto chars_column =
create_chars_child_column(strings_count, strings.null_count(), bytes, mr, stream);
auto chars_view = chars_column->mutable_view();
auto d_chars = chars_view.data<char>();
thrust::for_each_n(
rmm::exec_policy(stream)->on(stream),
thrust::make_counting_iterator<size_type>(0),
strings_count,
replace_slice_fn<two_pass::EXECUTE_OP>{d_strings, d_repl, start, stop, d_offsets, d_chars});
//
return make_strings_column(strings_count,
std::move(offsets_column),
std::move(chars_column),
strings.null_count(),
std::move(null_mask),
stream,
mr);
}
namespace {
/**
* @brief Function logic for the replace_multi API.
*
* This will perform the multi-replace operation on each string.
*/
template <two_pass Pass = two_pass::SIZE_ONLY>
struct replace_multi_fn {
column_device_view const d_strings;
column_device_view const d_targets;
column_device_view const d_repls;
const int32_t* d_offsets{};
char* d_chars{};
__device__ size_type operator()(size_type idx)
{
if (d_strings.is_null(idx)) return 0;
string_view d_str = d_strings.element<string_view>(idx);
char* out_ptr = nullptr;
if (Pass == two_pass::EXECUTE_OP) out_ptr = d_chars + d_offsets[idx];
const char* in_ptr = d_str.data();
size_type size = d_str.size_bytes();
size_type bytes = size, spos = 0, lpos = 0;
while (spos < size) { // check each character against each target
for (int tgt_idx = 0; tgt_idx < d_targets.size(); ++tgt_idx) {
string_view d_tgt = d_targets.element<string_view>(tgt_idx);
if ((d_tgt.size_bytes() <= (size - spos)) && // check fit
(d_tgt.compare(in_ptr + spos, d_tgt.size_bytes()) == 0)) // does it match
{ // found one
string_view d_repl;
if (d_repls.size() == 1)
d_repl = d_repls.element<string_view>(0);
else
d_repl = d_repls.element<string_view>(tgt_idx);
if (Pass == two_pass::SIZE_ONLY)
bytes += d_repl.size_bytes() - d_tgt.size_bytes();
else {
out_ptr = copy_and_increment(out_ptr, in_ptr + lpos, spos - lpos);
out_ptr = copy_string(out_ptr, d_repl);
lpos = spos + d_tgt.size_bytes();
}
spos += d_tgt.size_bytes() - 1;
break;
}
}
++spos;
}
if (Pass == two_pass::EXECUTE_OP) // copy remainder
memcpy(out_ptr, in_ptr + lpos, size - lpos);
return bytes;
}
};
} // namespace
std::unique_ptr<column> replace(strings_column_view const& strings,
strings_column_view const& targets,
strings_column_view const& repls,
rmm::mr::device_memory_resource* mr,
cudaStream_t stream)
{
auto strings_count = strings.size();
if (strings_count == 0) return make_empty_strings_column(mr, stream);
CUDF_EXPECTS(((targets.size() > 0) && (targets.null_count() == 0)),
"Parameters targets must not be empty and must not have nulls");
CUDF_EXPECTS(((repls.size() > 0) && (repls.null_count() == 0)),
"Parameters repls must not be empty and must not have nulls");
if (repls.size() > 1)
CUDF_EXPECTS(repls.size() == targets.size(), "Sizes for targets and repls must match");
auto strings_column = column_device_view::create(strings.parent(), stream);
auto d_strings = *strings_column;
auto targets_column = column_device_view::create(targets.parent(), stream);
auto d_targets = *targets_column;
auto repls_column = column_device_view::create(repls.parent(), stream);
auto d_repls = *repls_column;
// copy the null mask
rmm::device_buffer null_mask = copy_bitmask(strings.parent(), stream, mr);
// build offsets column
auto offsets_transformer_itr = thrust::make_transform_iterator(
thrust::make_counting_iterator<int32_t>(0),
replace_multi_fn<two_pass::SIZE_ONLY>{d_strings, d_targets, d_repls});
auto offsets_column = make_offsets_child_column(
offsets_transformer_itr, offsets_transformer_itr + strings_count, mr, stream);
auto d_offsets = offsets_column->view().data<int32_t>();
// build chars column
size_type bytes = thrust::device_pointer_cast(d_offsets)[strings_count];
auto chars_column =
create_chars_child_column(strings_count, strings.null_count(), bytes, mr, stream);
auto d_chars = chars_column->mutable_view().data<char>();
thrust::for_each_n(
rmm::exec_policy(stream)->on(stream),
thrust::make_counting_iterator<size_type>(0),
strings_count,
replace_multi_fn<two_pass::EXECUTE_OP>{d_strings, d_targets, d_repls, d_offsets, d_chars});
//
return make_strings_column(strings_count,
std::move(offsets_column),
std::move(chars_column),
strings.null_count(),
std::move(null_mask),
stream,
mr);
}
std::unique_ptr<column> replace_nulls(strings_column_view const& strings,
string_scalar const& repl,
rmm::mr::device_memory_resource* mr,
cudaStream_t stream)
{
size_type strings_count = strings.size();
if (strings_count == 0) return make_empty_strings_column(mr, stream);
CUDF_EXPECTS(repl.is_valid(), "Parameter repl must be valid.");
string_view d_repl(repl.data(), repl.size());
auto strings_column = column_device_view::create(strings.parent(), stream);
auto d_strings = *strings_column;
// build offsets column
auto offsets_transformer_itr = thrust::make_transform_iterator(
thrust::make_counting_iterator<int32_t>(0), [d_strings, d_repl] __device__(size_type idx) {
return d_strings.is_null(idx) ? d_repl.size_bytes()
: d_strings.element<string_view>(idx).size_bytes();
});
auto offsets_column = make_offsets_child_column(
offsets_transformer_itr, offsets_transformer_itr + strings_count, mr, stream);
auto d_offsets = offsets_column->view().data<int32_t>();
// build chars column
size_type bytes = thrust::device_pointer_cast(d_offsets)[strings_count];
auto chars_column = strings::detail::create_chars_child_column(
strings_count, strings.null_count(), bytes, mr, stream);
auto d_chars = chars_column->mutable_view().data<char>();
thrust::for_each_n(rmm::exec_policy(stream)->on(stream),
thrust::make_counting_iterator<size_type>(0),
strings_count,
[d_strings, d_repl, d_offsets, d_chars] __device__(size_type idx) {
string_view d_str = d_repl;
if (!d_strings.is_null(idx)) d_str = d_strings.element<string_view>(idx);
memcpy(d_chars + d_offsets[idx], d_str.data(), d_str.size_bytes());
});
//
return make_strings_column(strings_count,
std::move(offsets_column),
std::move(chars_column),
0,
rmm::device_buffer{0, stream, mr},
stream,
mr);
}
} // namespace detail
// external API
std::unique_ptr<column> replace(strings_column_view const& strings,
string_scalar const& target,
string_scalar const& repl,
int32_t maxrepl,
rmm::mr::device_memory_resource* mr)
{
CUDF_FUNC_RANGE();
return detail::replace(strings, target, repl, maxrepl, mr);
}
std::unique_ptr<column> replace_slice(strings_column_view const& strings,
string_scalar const& repl,
size_type start,
size_type stop,
rmm::mr::device_memory_resource* mr)
{
CUDF_FUNC_RANGE();
return detail::replace_slice(strings, repl, start, stop, mr);
}
std::unique_ptr<column> replace(strings_column_view const& strings,
strings_column_view const& targets,
strings_column_view const& repls,
rmm::mr::device_memory_resource* mr)
{
CUDF_FUNC_RANGE();
return detail::replace(strings, targets, repls, mr);
}
std::unique_ptr<column> replace_nulls(strings_column_view const& strings,
string_scalar const& repl,
rmm::mr::device_memory_resource* mr)
{
CUDF_FUNC_RANGE();
return detail::replace_nulls(strings, repl, mr);
}
} // namespace strings
} // namespace cudf
|
8b0d591d4e9dca7e8fb38ef6a505771e159c04e5.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "SearchByTriplet.cuh"
#include "WeakTracksAdder.cuh"
/**
* @brief Calculates the parameters according to a root means square fit
* and returns the chi2.
*/
__device__ float means_square_fit_chi2(
const float* hit_Xs,
const float* hit_Ys,
const float* hit_Zs,
const Velo::TrackletHits& track
) {
Velo::State state;
// Fit parameters
float s0, sx, sz, sxz, sz2;
float u0, uy, uz, uyz, uz2;
s0 = sx = sz = sxz = sz2 = 0.0f;
u0 = uy = uz = uyz = uz2 = 0.0f;
// Iterate over hits
for (unsigned short h=0; h<3; ++h) {
const auto hitno = track.hits[h];
const auto x = hit_Xs[hitno];
const auto y = hit_Ys[hitno];
const auto z = hit_Zs[hitno];
const auto wx = VeloTracking::param_w;
const auto wx_t_x = wx * x;
const auto wx_t_z = wx * z;
s0 += wx;
sx += wx_t_x;
sz += wx_t_z;
sxz += wx_t_x * z;
sz2 += wx_t_z * z;
const auto wy = VeloTracking::param_w;
const auto wy_t_y = wy * y;
const auto wy_t_z = wy * z;
u0 += wy;
uy += wy_t_y;
uz += wy_t_z;
uyz += wy_t_y * z;
uz2 += wy_t_z * z;
}
{
// Calculate tx, ty and backward
const auto dens = 1.0f / (sz2 * s0 - sz * sz);
state.tx = (sxz * s0 - sx * sz) * dens;
state.x = (sx * sz2 - sxz * sz) * dens;
const auto denu = 1.0f / (uz2 * u0 - uz * uz);
state.ty = (uyz * u0 - uy * uz) * denu;
state.y = (uy * uz2 - uyz * uz) * denu;
}
{
//=========================================================================
// Chi2 / degrees-of-freedom of straight-line fit
//=========================================================================
float ch = 0.0f;
int nDoF = -4;
for (uint h=0; h<3; ++h) {
const auto hitno = track.hits[h];
const auto z = hit_Zs[hitno];
const auto x = state.x + state.tx * z;
const auto y = state.y + state.ty * z;
const auto dx = x - hit_Xs[hitno];
const auto dy = y - hit_Ys[hitno];
ch += dx * dx * VeloTracking::param_w + dy * dy * VeloTracking::param_w;
// Nice :)
// TODO: We can get rid of the X and Y read here
// float sum_w_xzi_2 = CL_VeloTracking::param_w * x; // for each hit
// float sum_w_xi_2 = CL_VeloTracking::param_w * hit_Xs[hitno]; // for each hit
// ch = (sum_w_xzi_2 - sum_w_xi_2) + (sum_w_yzi_2 - sum_w_yi_2);
nDoF += 2;
}
state.chi2 = ch / nDoF;
}
return state.chi2;
}
__device__ void weak_tracks_adder_impl(
uint* weaktracks_insert_pointer,
uint* tracks_insert_pointer,
Velo::TrackletHits* weak_tracks,
Velo::TrackHits* tracks,
bool* hit_used,
const float* hit_Xs,
const float* hit_Ys,
const float* hit_Zs
) {
// Compute the weak tracks
const auto weaktracks_total = weaktracks_insert_pointer[0];
for (int i=0; i<(weaktracks_total + blockDim.x - 1) / blockDim.x; ++i) {
const auto weaktrack_no = blockDim.x * i + threadIdx.x;
if (weaktrack_no < weaktracks_total) {
const Velo::TrackletHits& t = weak_tracks[weaktrack_no];
const bool any_used = hit_used[t.hits[0]] || hit_used[t.hits[1]] || hit_used[t.hits[2]];
const float chi2 = means_square_fit_chi2(
hit_Xs,
hit_Ys,
hit_Zs,
t
);
// Store them in the tracks bag
if (!any_used && chi2 < VeloTracking::max_chi2) {
const uint trackno = atomicAdd(tracks_insert_pointer, 1);
assert(trackno < VeloTracking::max_tracks);
tracks[trackno] = Velo::TrackHits{t};
}
}
}
}
__global__ void weak_tracks_adder(
uint32_t* dev_velo_cluster_container,
uint* dev_module_cluster_start,
Velo::TrackHits* dev_tracks,
Velo::TrackletHits* dev_weak_tracks,
bool* dev_hit_used,
int* dev_atomics_storage
) {
/* Data initialization */
// Each event is treated with two blocks, one for each side.
const uint event_number = blockIdx.x;
const uint number_of_events = gridDim.x;
const uint tracks_offset = event_number * VeloTracking::max_tracks;
// Pointers to data within the event
const uint number_of_hits = dev_module_cluster_start[VeloTracking::n_modules * number_of_events];
const uint* module_hitStarts = dev_module_cluster_start + event_number * VeloTracking::n_modules;
const uint hit_offset = module_hitStarts[0];
assert((module_hitStarts[52] - module_hitStarts[0]) < VeloTracking::max_number_of_hits_per_event);
// Order has changed since SortByPhi
const float* hit_Ys = (float*) (dev_velo_cluster_container + hit_offset);
const float* hit_Zs = (float*) (dev_velo_cluster_container + number_of_hits + hit_offset);
const float* hit_Xs = (float*) (dev_velo_cluster_container + 5 * number_of_hits + hit_offset);
// Per event datatypes
Velo::TrackHits* tracks = dev_tracks + tracks_offset;
uint* tracks_insert_pointer = (uint*) dev_atomics_storage + event_number;
// Per side datatypes
bool* hit_used = dev_hit_used + hit_offset;
Velo::TrackletHits* weak_tracks = dev_weak_tracks + event_number * VeloTracking::max_weak_tracks;
// Initialize variables according to event number and module side
// Insert pointers (atomics)
const int ip_shift = number_of_events + event_number * (VeloTracking::num_atomics - 1);
uint* weaktracks_insert_pointer = (uint*) dev_atomics_storage + ip_shift;
weak_tracks_adder_impl(
weaktracks_insert_pointer,
tracks_insert_pointer,
weak_tracks,
tracks,
hit_used,
hit_Xs,
hit_Ys,
hit_Zs
);
}
| 8b0d591d4e9dca7e8fb38ef6a505771e159c04e5.cu | #include "SearchByTriplet.cuh"
#include "WeakTracksAdder.cuh"
/**
* @brief Calculates the parameters according to a root means square fit
* and returns the chi2.
*/
__device__ float means_square_fit_chi2(
const float* hit_Xs,
const float* hit_Ys,
const float* hit_Zs,
const Velo::TrackletHits& track
) {
Velo::State state;
// Fit parameters
float s0, sx, sz, sxz, sz2;
float u0, uy, uz, uyz, uz2;
s0 = sx = sz = sxz = sz2 = 0.0f;
u0 = uy = uz = uyz = uz2 = 0.0f;
// Iterate over hits
for (unsigned short h=0; h<3; ++h) {
const auto hitno = track.hits[h];
const auto x = hit_Xs[hitno];
const auto y = hit_Ys[hitno];
const auto z = hit_Zs[hitno];
const auto wx = VeloTracking::param_w;
const auto wx_t_x = wx * x;
const auto wx_t_z = wx * z;
s0 += wx;
sx += wx_t_x;
sz += wx_t_z;
sxz += wx_t_x * z;
sz2 += wx_t_z * z;
const auto wy = VeloTracking::param_w;
const auto wy_t_y = wy * y;
const auto wy_t_z = wy * z;
u0 += wy;
uy += wy_t_y;
uz += wy_t_z;
uyz += wy_t_y * z;
uz2 += wy_t_z * z;
}
{
// Calculate tx, ty and backward
const auto dens = 1.0f / (sz2 * s0 - sz * sz);
state.tx = (sxz * s0 - sx * sz) * dens;
state.x = (sx * sz2 - sxz * sz) * dens;
const auto denu = 1.0f / (uz2 * u0 - uz * uz);
state.ty = (uyz * u0 - uy * uz) * denu;
state.y = (uy * uz2 - uyz * uz) * denu;
}
{
//=========================================================================
// Chi2 / degrees-of-freedom of straight-line fit
//=========================================================================
float ch = 0.0f;
int nDoF = -4;
for (uint h=0; h<3; ++h) {
const auto hitno = track.hits[h];
const auto z = hit_Zs[hitno];
const auto x = state.x + state.tx * z;
const auto y = state.y + state.ty * z;
const auto dx = x - hit_Xs[hitno];
const auto dy = y - hit_Ys[hitno];
ch += dx * dx * VeloTracking::param_w + dy * dy * VeloTracking::param_w;
// Nice :)
// TODO: We can get rid of the X and Y read here
// float sum_w_xzi_2 = CL_VeloTracking::param_w * x; // for each hit
// float sum_w_xi_2 = CL_VeloTracking::param_w * hit_Xs[hitno]; // for each hit
// ch = (sum_w_xzi_2 - sum_w_xi_2) + (sum_w_yzi_2 - sum_w_yi_2);
nDoF += 2;
}
state.chi2 = ch / nDoF;
}
return state.chi2;
}
__device__ void weak_tracks_adder_impl(
uint* weaktracks_insert_pointer,
uint* tracks_insert_pointer,
Velo::TrackletHits* weak_tracks,
Velo::TrackHits* tracks,
bool* hit_used,
const float* hit_Xs,
const float* hit_Ys,
const float* hit_Zs
) {
// Compute the weak tracks
const auto weaktracks_total = weaktracks_insert_pointer[0];
for (int i=0; i<(weaktracks_total + blockDim.x - 1) / blockDim.x; ++i) {
const auto weaktrack_no = blockDim.x * i + threadIdx.x;
if (weaktrack_no < weaktracks_total) {
const Velo::TrackletHits& t = weak_tracks[weaktrack_no];
const bool any_used = hit_used[t.hits[0]] || hit_used[t.hits[1]] || hit_used[t.hits[2]];
const float chi2 = means_square_fit_chi2(
hit_Xs,
hit_Ys,
hit_Zs,
t
);
// Store them in the tracks bag
if (!any_used && chi2 < VeloTracking::max_chi2) {
const uint trackno = atomicAdd(tracks_insert_pointer, 1);
assert(trackno < VeloTracking::max_tracks);
tracks[trackno] = Velo::TrackHits{t};
}
}
}
}
__global__ void weak_tracks_adder(
uint32_t* dev_velo_cluster_container,
uint* dev_module_cluster_start,
Velo::TrackHits* dev_tracks,
Velo::TrackletHits* dev_weak_tracks,
bool* dev_hit_used,
int* dev_atomics_storage
) {
/* Data initialization */
// Each event is treated with two blocks, one for each side.
const uint event_number = blockIdx.x;
const uint number_of_events = gridDim.x;
const uint tracks_offset = event_number * VeloTracking::max_tracks;
// Pointers to data within the event
const uint number_of_hits = dev_module_cluster_start[VeloTracking::n_modules * number_of_events];
const uint* module_hitStarts = dev_module_cluster_start + event_number * VeloTracking::n_modules;
const uint hit_offset = module_hitStarts[0];
assert((module_hitStarts[52] - module_hitStarts[0]) < VeloTracking::max_number_of_hits_per_event);
// Order has changed since SortByPhi
const float* hit_Ys = (float*) (dev_velo_cluster_container + hit_offset);
const float* hit_Zs = (float*) (dev_velo_cluster_container + number_of_hits + hit_offset);
const float* hit_Xs = (float*) (dev_velo_cluster_container + 5 * number_of_hits + hit_offset);
// Per event datatypes
Velo::TrackHits* tracks = dev_tracks + tracks_offset;
uint* tracks_insert_pointer = (uint*) dev_atomics_storage + event_number;
// Per side datatypes
bool* hit_used = dev_hit_used + hit_offset;
Velo::TrackletHits* weak_tracks = dev_weak_tracks + event_number * VeloTracking::max_weak_tracks;
// Initialize variables according to event number and module side
// Insert pointers (atomics)
const int ip_shift = number_of_events + event_number * (VeloTracking::num_atomics - 1);
uint* weaktracks_insert_pointer = (uint*) dev_atomics_storage + ip_shift;
weak_tracks_adder_impl(
weaktracks_insert_pointer,
tracks_insert_pointer,
weak_tracks,
tracks,
hit_used,
hit_Xs,
hit_Ys,
hit_Zs
);
}
|
1110353f44243fe224d8150a1de6b6810a4ab057.hip | // !!! This is a file automatically generated by hipify!!!
#include <iostream>
#include <math.h>
#include <fstream>
#include <hip/hip_runtime.h>
//#define flouble float
#define flouble double
#define MAXITERATIONS 20000
using namespace std;
void aufg13a();
flouble* initMatrixRightHandSide(int n, flouble h );
flouble* jacobiIter(int n, flouble *f, flouble valBoundary, int* numberOfIterations, flouble h);
void aufg13b();
flouble* jacobiIterCuda_CPU(int n, flouble *f, flouble valBoundary, int* numberOfIterations, flouble h);
__global__ void initMatrixRightHandSideCuda_CUDA(flouble h, flouble* matrix);
__global__ void initSolutionVectors_CUDA(flouble *actualIteration, flouble valBoundary);
__global__ void jacoboIteration_CUDA(flouble *actualIteration, flouble *lastIterSol, int n, flouble valSubDiag,
flouble valMainDiag, flouble *f);
__global__ void calculateResidual_CUDA(double *a, double *b, double *c);
__global__ void calculateResidual_CUDA(float *a, float *b, float *c);
void aufg13c();
void aufg13d();
// Utility
flouble* initMatrixKonstant(int m,int n, flouble value ) ;
void displayMyMatrix(flouble* matrix, int m,int n);
void saveMyMatrix(flouble* matrix, int m,int n, flouble h);
int main() {
std::cout << "Hello, World!" << std::endl;
aufg13b();
return 0;
}
// _________________________________________________________________________________________ //
//
// Aufgabe 13a
// _________________________________________________________________________________________ //
void aufg13a() {
int n=1024;
flouble h = 1./(n-1);
flouble boundaryValue=0;
flouble *fun;
flouble *result;
int doneIterations=0;
fun=initMatrixRightHandSide(n,h);
result=jacobiIter(n, fun, boundaryValue, &doneIterations,h);
saveMyMatrix(result, n,n,h);
delete(fun);
delete(result);
}
flouble* jacobiIter(int n, flouble *f, flouble valBoundary, int* numberOfIterations, flouble h) {
flouble* actualIteration=new flouble[n*n]();
flouble* lastIterSol=new flouble[n*n]();
flouble *temp;
flouble tol=0.0001;
int iteration=0;
flouble resi=tol+1;
int step=100;
flouble hsquare=h*h;
flouble valLowBlockDiag=-1/hsquare;
flouble valUpBlockDiag=-1/hsquare;
flouble valLowMinDiag=-1/hsquare;
flouble valUpDiag=-1/hsquare;
flouble valMainDiag=4/hsquare;
// boundary values init (outer)
for(int i=0;i<n;i++) {
actualIteration[i]=valBoundary;
lastIterSol[i]=valBoundary;
actualIteration[n*(n-1)+i]=valBoundary;
lastIterSol[n*(n-1)+i]=valBoundary;
}
for(int k=1;k<n-1;k++) { // iterate through blocks
actualIteration[k*n]=valBoundary;
lastIterSol[k*n]=valBoundary;
actualIteration[(k+1)*n-1]=valBoundary;
lastIterSol[(k+1)*n-1]=valBoundary;
}
int nm1=n-1;
int index;
while(iteration<MAXITERATIONS&&resi>tol) {
// consecutive blocks
for(int k=1;k<nm1;k++) { // iterate through blocks
for(int i=1;i<nm1;i++) { // iterate in block
index=k*n+i;
actualIteration[index]=1/valMainDiag*(f[index]-valLowBlockDiag*lastIterSol[index-n]-valLowMinDiag*lastIterSol[index-1]-valUpDiag*lastIterSol[index+1]-valUpBlockDiag*lastIterSol[index+n]);
}
}
if (!(iteration % step)) {
resi=0;
for(int i=0;i<n*n;i++) {
resi+=fabs(actualIteration[i]- lastIterSol[i]);
}
//std::cout << iteration <<": "<< resi<< std::endl;
}
temp=lastIterSol;
lastIterSol=actualIteration;
actualIteration=temp;
iteration++;
}
std::cout << "Calculation finished after "<<iteration<<" Iterations.(%"<<step<<")"<<std::endl;
*numberOfIterations=iteration;
delete(lastIterSol);
return actualIteration;
}
flouble* initMatrixRightHandSide(int n, flouble h ) {
flouble*matrix=new flouble[n*n];
flouble x;
flouble y;
for (int i=0;i<n;i++) {
for (int j=0;j<n;j++) {
x=h*i;
y=h*j;
matrix[i*n+j]=x*(1-x)+y*(1-y);
// printf("<%f %f> %f\n",x,y,matrix[i*m+j]);
}
}
return matrix;
}
// _________________________________________________________________________________________ //
//
// Aufgabe 13b
// _________________________________________________________________________________________ //
void aufg13b() {
int n=1024;
int nn=n*n;
flouble h = 1./(n-1);
flouble boundaryValue=0;
flouble *cuda_fun;
hipMalloc(&cuda_fun,sizeof(flouble)*nn);
flouble *result=new flouble[nn];
int doneIterations=0;
hipLaunchKernelGGL(( initMatrixRightHandSideCuda_CUDA), dim3(n),dim3(n), 0, 0, h,cuda_fun);
result=jacobiIterCuda_CPU(n, cuda_fun, boundaryValue, &doneIterations,h);
hipDeviceReset();
saveMyMatrix(result, n,n,1);
}
flouble* jacobiIterCuda_CPU(int n, flouble *cudaF, flouble valBoundary, int* numberOfIterations, flouble h) {
int nn=n*n;
flouble* actualIteration=new flouble[nn]();
flouble *cuda_actualIteration, *cuda_lastIterSol;
hipMalloc(&cuda_actualIteration,sizeof(flouble)*nn);;
hipMalloc(&cuda_lastIterSol,sizeof(flouble)*nn);;
hipLaunchKernelGGL(( initSolutionVectors_CUDA) , dim3(n),dim3(n), 0, 0, cuda_actualIteration, valBoundary);
flouble tol=0.0001;
int iteration=0;
flouble resi=tol+1;
flouble *resiCuda;
hipMalloc(&resiCuda,sizeof(flouble));
int step=100; // 2 Iterations
int maxDoubleIter=MAXITERATIONS/2;
flouble hsquare=h*h;
flouble valSubDiag=-1/hsquare;
flouble valMainDiag=4/hsquare;
while(iteration<maxDoubleIter) {
// consecutive blocks
hipLaunchKernelGGL(( jacoboIteration_CUDA) , dim3(n),dim3(n), 0, 0, cuda_actualIteration,cuda_lastIterSol,n,valSubDiag,valMainDiag,cudaF);
hipLaunchKernelGGL(( jacoboIteration_CUDA) , dim3(n),dim3(n), 0, 0, cuda_lastIterSol,cuda_actualIteration,n,valSubDiag,valMainDiag,cudaF);
iteration++;
if(iteration%step==0) {
hipLaunchKernelGGL(( calculateResidual_CUDA) , dim3(n),dim3(n), 0, 0, cuda_actualIteration, cuda_lastIterSol, resiCuda);
hipMemcpy(&resi,resiCuda,sizeof(flouble),hipMemcpyDeviceToHost);
cout<<iteration*2<<": "<<resi<<endl;
if(resi<tol) {
break;
}
resi=0; // Reset resiCuda.....is there any better way?
hipMemcpy(resiCuda,&resi,sizeof(flouble),hipMemcpyHostToDevice);
}
}
std::cout << "Calculation finished after "<<2*iteration<<" Iterations.(%"<<step<<")"<<std::endl;
*numberOfIterations=iteration*2;
hipMemcpy(actualIteration,cuda_actualIteration, sizeof(flouble)*nn, hipMemcpyDeviceToHost);
return actualIteration;
}
__global__ void initMatrixRightHandSideCuda_CUDA(flouble h, flouble* matrix) {
// Version for n==1024
int tid=threadIdx.x;
int bid=blockIdx.x;
flouble x=h*bid;
flouble y=h*tid;
matrix[bid*blockDim.x+tid]=x*(1-x)+y*(1-y);
}
__global__ void initSolutionVectors_CUDA(flouble *actualIteration, flouble valBoundary) {
int tid = threadIdx.x;
int bid = blockIdx.x;
int n = blockDim.x;
if ((bid == 0)||(bid == n-1)) { // boundary values init (outer)
actualIteration[n * bid + tid] = valBoundary;
} else {
if((tid==0)||tid==n-1) {
actualIteration[n * bid + tid] = valBoundary;
}else {
actualIteration[bid*n+tid] = 0;
}
}
}
__global__ void jacoboIteration_CUDA(flouble *actualIteration, flouble *lastIterSol, int n, flouble valSubDiag,
flouble valMainDiag, flouble *f) {
int index; //index=k*n+i;
int tid=threadIdx.x;
int bid=blockIdx.x;
int bdim=blockDim.x;
if(bid==0||bid==gridDim.x-1) { // Boundaries, nothing to do here
return;
}
if(tid==0||tid==gridDim.x-1) { // Boundaries, nothing to do here
return;
}
index=bid*bdim+tid;
actualIteration[index]=1/valMainDiag*(f[index]-valSubDiag*lastIterSol[index-bdim]-valSubDiag*lastIterSol[index-1]-valSubDiag*lastIterSol[index+1]-valSubDiag*lastIterSol[index+bdim]);
}
__global__ void calculateResidual_CUDA(float *a, float *b, float *c) {
__shared__ float se[1024];
int tid=threadIdx.x;
int bid=blockIdx.x;
int n=blockDim.x;
// Calculate
se[tid]=fabsf(a[tid+bid*n]-b[tid+bid*n]);
__syncthreads();
// Reducto
int numActiveThreads=n/2;
while(numActiveThreads>0) {
if(tid<numActiveThreads) {
se[tid]=se[tid]+se[tid+numActiveThreads];
}
numActiveThreads=numActiveThreads/2;
__syncthreads();
}
if(tid==0) {
atomicAdd(c,se[0]);
}
}
__global__ void calculateResidual_CUDA(double *a, double *b, double *c) {
__shared__ double se[1024];
int tid=threadIdx.x;
int bid=blockIdx.x;
int n=blockDim.x;
// Calculate a.*b
se[tid]=fabsf(a[tid+bid*n]-b[tid+bid*n]);
__syncthreads();
// Sum Reducto
int numActiveThreads=n/2;
while(numActiveThreads>0) {
if(tid<numActiveThreads) {
se[tid]=se[tid]+se[tid+numActiveThreads];
}
numActiveThreads=numActiveThreads/2;
__syncthreads();
}
if(tid==0) {
atomicAdd(c,se[0]);
}
}
// Utility functions
flouble* initMatrixKonstant(int m,int n, flouble value ) {
flouble*matrix=new flouble[n*m];
for (int i=0;i<m;i++) {
for (int j=0;j<n;j++) {
matrix[i*m+j]=value;
}
}
return matrix;
}
void displayMyMatrix(flouble* matrix, int m,int n) {
printf(" \n");
for (int i=0;i<m;i++) {
for (int j=0;j<n;j++) {
//printf("<%d %d %f>",i,j,matrix[i*m+j]);
printf("%f ",matrix[i*m+j]);
}
printf(" \n");
}
}
void saveMyMatrix(flouble* matrix, int m,int n, flouble h) {
// h=1 for save indices
std::ofstream myfile;
myfile.open ("./results.dat");
flouble x;
flouble y;
for (int i=0;i<m;i++) {
for (int j=0;j<n;j++) {
x=h*i;
y=h*j;
// printf("<%d %d %f>",x,y,matrix[i*m+j]);
myfile<<x<<" "<<y<<" "<<matrix[i*m+j]<<"\n";
}
myfile<<std::endl;
// printf(" \n");
}
myfile.close();
}
| 1110353f44243fe224d8150a1de6b6810a4ab057.cu | #include <iostream>
#include <math.h>
#include <fstream>
#include <cuda.h>
//#define flouble float
#define flouble double
#define MAXITERATIONS 20000
using namespace std;
void aufg13a();
flouble* initMatrixRightHandSide(int n, flouble h );
flouble* jacobiIter(int n, flouble *f, flouble valBoundary, int* numberOfIterations, flouble h);
void aufg13b();
flouble* jacobiIterCuda_CPU(int n, flouble *f, flouble valBoundary, int* numberOfIterations, flouble h);
__global__ void initMatrixRightHandSideCuda_CUDA(flouble h, flouble* matrix);
__global__ void initSolutionVectors_CUDA(flouble *actualIteration, flouble valBoundary);
__global__ void jacoboIteration_CUDA(flouble *actualIteration, flouble *lastIterSol, int n, flouble valSubDiag,
flouble valMainDiag, flouble *f);
__global__ void calculateResidual_CUDA(double *a, double *b, double *c);
__global__ void calculateResidual_CUDA(float *a, float *b, float *c);
void aufg13c();
void aufg13d();
// Utility
flouble* initMatrixKonstant(int m,int n, flouble value ) ;
void displayMyMatrix(flouble* matrix, int m,int n);
void saveMyMatrix(flouble* matrix, int m,int n, flouble h);
int main() {
std::cout << "Hello, World!" << std::endl;
aufg13b();
return 0;
}
// _________________________________________________________________________________________ //
//
// Aufgabe 13a
// _________________________________________________________________________________________ //
void aufg13a() {
int n=1024;
flouble h = 1./(n-1);
flouble boundaryValue=0;
flouble *fun;
flouble *result;
int doneIterations=0;
fun=initMatrixRightHandSide(n,h);
result=jacobiIter(n, fun, boundaryValue, &doneIterations,h);
saveMyMatrix(result, n,n,h);
delete(fun);
delete(result);
}
flouble* jacobiIter(int n, flouble *f, flouble valBoundary, int* numberOfIterations, flouble h) {
flouble* actualIteration=new flouble[n*n]();
flouble* lastIterSol=new flouble[n*n]();
flouble *temp;
flouble tol=0.0001;
int iteration=0;
flouble resi=tol+1;
int step=100;
flouble hsquare=h*h;
flouble valLowBlockDiag=-1/hsquare;
flouble valUpBlockDiag=-1/hsquare;
flouble valLowMinDiag=-1/hsquare;
flouble valUpDiag=-1/hsquare;
flouble valMainDiag=4/hsquare;
// boundary values init (outer)
for(int i=0;i<n;i++) {
actualIteration[i]=valBoundary;
lastIterSol[i]=valBoundary;
actualIteration[n*(n-1)+i]=valBoundary;
lastIterSol[n*(n-1)+i]=valBoundary;
}
for(int k=1;k<n-1;k++) { // iterate through blocks
actualIteration[k*n]=valBoundary;
lastIterSol[k*n]=valBoundary;
actualIteration[(k+1)*n-1]=valBoundary;
lastIterSol[(k+1)*n-1]=valBoundary;
}
int nm1=n-1;
int index;
while(iteration<MAXITERATIONS&&resi>tol) {
// consecutive blocks
for(int k=1;k<nm1;k++) { // iterate through blocks
for(int i=1;i<nm1;i++) { // iterate in block
index=k*n+i;
actualIteration[index]=1/valMainDiag*(f[index]-valLowBlockDiag*lastIterSol[index-n]-valLowMinDiag*lastIterSol[index-1]-valUpDiag*lastIterSol[index+1]-valUpBlockDiag*lastIterSol[index+n]);
}
}
if (!(iteration % step)) {
resi=0;
for(int i=0;i<n*n;i++) {
resi+=fabs(actualIteration[i]- lastIterSol[i]);
}
//std::cout << iteration <<": "<< resi<< std::endl;
}
temp=lastIterSol;
lastIterSol=actualIteration;
actualIteration=temp;
iteration++;
}
std::cout << "Calculation finished after "<<iteration<<" Iterations.(%"<<step<<")"<<std::endl;
*numberOfIterations=iteration;
delete(lastIterSol);
return actualIteration;
}
flouble* initMatrixRightHandSide(int n, flouble h ) {
flouble*matrix=new flouble[n*n];
flouble x;
flouble y;
for (int i=0;i<n;i++) {
for (int j=0;j<n;j++) {
x=h*i;
y=h*j;
matrix[i*n+j]=x*(1-x)+y*(1-y);
// printf("<%f %f> %f\n",x,y,matrix[i*m+j]);
}
}
return matrix;
}
// _________________________________________________________________________________________ //
//
// Aufgabe 13b
// _________________________________________________________________________________________ //
void aufg13b() {
int n=1024;
int nn=n*n;
flouble h = 1./(n-1);
flouble boundaryValue=0;
flouble *cuda_fun;
cudaMalloc(&cuda_fun,sizeof(flouble)*nn);
flouble *result=new flouble[nn];
int doneIterations=0;
initMatrixRightHandSideCuda_CUDA<<<n,n>>>(h,cuda_fun);
result=jacobiIterCuda_CPU(n, cuda_fun, boundaryValue, &doneIterations,h);
cudaThreadExit();
saveMyMatrix(result, n,n,1);
}
flouble* jacobiIterCuda_CPU(int n, flouble *cudaF, flouble valBoundary, int* numberOfIterations, flouble h) {
int nn=n*n;
flouble* actualIteration=new flouble[nn]();
flouble *cuda_actualIteration, *cuda_lastIterSol;
cudaMalloc(&cuda_actualIteration,sizeof(flouble)*nn);;
cudaMalloc(&cuda_lastIterSol,sizeof(flouble)*nn);;
initSolutionVectors_CUDA <<<n,n>>> (cuda_actualIteration, valBoundary);
flouble tol=0.0001;
int iteration=0;
flouble resi=tol+1;
flouble *resiCuda;
cudaMalloc(&resiCuda,sizeof(flouble));
int step=100; // 2 Iterations
int maxDoubleIter=MAXITERATIONS/2;
flouble hsquare=h*h;
flouble valSubDiag=-1/hsquare;
flouble valMainDiag=4/hsquare;
while(iteration<maxDoubleIter) {
// consecutive blocks
jacoboIteration_CUDA <<<n,n>>>(cuda_actualIteration,cuda_lastIterSol,n,valSubDiag,valMainDiag,cudaF);
jacoboIteration_CUDA <<<n,n>>>(cuda_lastIterSol,cuda_actualIteration,n,valSubDiag,valMainDiag,cudaF);
iteration++;
if(iteration%step==0) {
calculateResidual_CUDA <<<n,n>>>(cuda_actualIteration, cuda_lastIterSol, resiCuda);
cudaMemcpy(&resi,resiCuda,sizeof(flouble),cudaMemcpyDeviceToHost);
cout<<iteration*2<<": "<<resi<<endl;
if(resi<tol) {
break;
}
resi=0; // Reset resiCuda.....is there any better way?
cudaMemcpy(resiCuda,&resi,sizeof(flouble),cudaMemcpyHostToDevice);
}
}
std::cout << "Calculation finished after "<<2*iteration<<" Iterations.(%"<<step<<")"<<std::endl;
*numberOfIterations=iteration*2;
cudaMemcpy(actualIteration,cuda_actualIteration, sizeof(flouble)*nn, cudaMemcpyDeviceToHost);
return actualIteration;
}
__global__ void initMatrixRightHandSideCuda_CUDA(flouble h, flouble* matrix) {
// Version for n==1024
int tid=threadIdx.x;
int bid=blockIdx.x;
flouble x=h*bid;
flouble y=h*tid;
matrix[bid*blockDim.x+tid]=x*(1-x)+y*(1-y);
}
__global__ void initSolutionVectors_CUDA(flouble *actualIteration, flouble valBoundary) {
int tid = threadIdx.x;
int bid = blockIdx.x;
int n = blockDim.x;
if ((bid == 0)||(bid == n-1)) { // boundary values init (outer)
actualIteration[n * bid + tid] = valBoundary;
} else {
if((tid==0)||tid==n-1) {
actualIteration[n * bid + tid] = valBoundary;
}else {
actualIteration[bid*n+tid] = 0;
}
}
}
__global__ void jacoboIteration_CUDA(flouble *actualIteration, flouble *lastIterSol, int n, flouble valSubDiag,
flouble valMainDiag, flouble *f) {
int index; //index=k*n+i;
int tid=threadIdx.x;
int bid=blockIdx.x;
int bdim=blockDim.x;
if(bid==0||bid==gridDim.x-1) { // Boundaries, nothing to do here
return;
}
if(tid==0||tid==gridDim.x-1) { // Boundaries, nothing to do here
return;
}
index=bid*bdim+tid;
actualIteration[index]=1/valMainDiag*(f[index]-valSubDiag*lastIterSol[index-bdim]-valSubDiag*lastIterSol[index-1]-valSubDiag*lastIterSol[index+1]-valSubDiag*lastIterSol[index+bdim]);
}
__global__ void calculateResidual_CUDA(float *a, float *b, float *c) {
__shared__ float se[1024];
int tid=threadIdx.x;
int bid=blockIdx.x;
int n=blockDim.x;
// Calculate
se[tid]=fabsf(a[tid+bid*n]-b[tid+bid*n]);
__syncthreads();
// Reducto
int numActiveThreads=n/2;
while(numActiveThreads>0) {
if(tid<numActiveThreads) {
se[tid]=se[tid]+se[tid+numActiveThreads];
}
numActiveThreads=numActiveThreads/2;
__syncthreads();
}
if(tid==0) {
atomicAdd(c,se[0]);
}
}
__global__ void calculateResidual_CUDA(double *a, double *b, double *c) {
__shared__ double se[1024];
int tid=threadIdx.x;
int bid=blockIdx.x;
int n=blockDim.x;
// Calculate a.*b
se[tid]=fabsf(a[tid+bid*n]-b[tid+bid*n]);
__syncthreads();
// Sum Reducto
int numActiveThreads=n/2;
while(numActiveThreads>0) {
if(tid<numActiveThreads) {
se[tid]=se[tid]+se[tid+numActiveThreads];
}
numActiveThreads=numActiveThreads/2;
__syncthreads();
}
if(tid==0) {
atomicAdd(c,se[0]);
}
}
// Utility functions
flouble* initMatrixKonstant(int m,int n, flouble value ) {
flouble*matrix=new flouble[n*m];
for (int i=0;i<m;i++) {
for (int j=0;j<n;j++) {
matrix[i*m+j]=value;
}
}
return matrix;
}
void displayMyMatrix(flouble* matrix, int m,int n) {
printf(" \n");
for (int i=0;i<m;i++) {
for (int j=0;j<n;j++) {
//printf("<%d %d %f>",i,j,matrix[i*m+j]);
printf("%f ",matrix[i*m+j]);
}
printf(" \n");
}
}
void saveMyMatrix(flouble* matrix, int m,int n, flouble h) {
// h=1 for save indices
std::ofstream myfile;
myfile.open ("./results.dat");
flouble x;
flouble y;
for (int i=0;i<m;i++) {
for (int j=0;j<n;j++) {
x=h*i;
y=h*j;
// printf("<%d %d %f>",x,y,matrix[i*m+j]);
myfile<<x<<" "<<y<<" "<<matrix[i*m+j]<<"\n";
}
myfile<<std::endl;
// printf(" \n");
}
myfile.close();
}
|
b79e10c87621b7c311e5a13cc69c5b76e714d08e.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "../include/feed_forward.h"
//this function will determine the output of a signle layer in the network
//needs to have the number of block and threads equal to neurons and weights respectively
__global__ void eval_layer(int num_threads, int layer, int num_weights, int num_neurons, float *input, float *weights, float *outputs)
{
//each block will correspond to one neuron
int neuron_index = blockIdx.x;
//each thread will correspond to a weight of a neuron
int weight_index = threadIdx.x;
//we will need two indexes if there are not enough threads to execute each weight
int weight_index2 = 0;
if(num_threads < num_weights){
weight_index2 = weight_index + num_threads;
}
__shared__ float output_reduce[SHARED_ARRAY_SIZE];
__syncthreads();
for (int i = weight_index; i < SHARED_ARRAY_SIZE; i+=num_threads){
output_reduce[i]=(float)0.0;
}
__syncthreads();
//__shared__ float layer_input[MAX_NUM_WEIGHTS];
int neuron_weight_index = num_weights*MAX_NUM_NEURONS*layer + num_weights*neuron_index + weight_index;
int neuron_weight_index2 = num_weights*MAX_NUM_NEURONS*layer + num_weights*neuron_index + weight_index2;
if(neuron_index < num_neurons && weight_index < num_weights){
if(layer == 0){//first layer so read from the input data
output_reduce[weight_index] = weights[neuron_weight_index]*input[weight_index];
if(weight_index2 && weight_index2 < num_weights){
output_reduce[weight_index2] = weights[neuron_weight_index2]*input[weight_index2];
}
}
else{
output_reduce[weight_index] = weights[neuron_weight_index]*outputs[(layer-1)*MAX_NUM_NEURONS+weight_index];
if(weight_index2 && weight_index2 < num_weights){
output_reduce[weight_index2] = weights[neuron_weight_index2]*outputs[(layer-1)*MAX_NUM_NEURONS+weight_index2];
}
}
//calculated all the weights for each neuron, so now sync
__syncthreads();
//reduce all the weights for each neuron
for (int s = (int)SHARED_ARRAY_SIZE/2; s > 0; s>>=1) {
if(weight_index < s){
output_reduce[weight_index] += output_reduce[weight_index + s];
}
if(weight_index2 && weight_index2 < s){
output_reduce[weight_index2] += output_reduce[weight_index2 + s];
}
__syncthreads();
}
__syncthreads();
if(weight_index == 0){
//set the next layers input the the current layers output
//outputs[layer*MAX_NUM_NEURONS + neuron_index] = output_reduce[0];
outputs[layer*MAX_NUM_NEURONS + neuron_index] = (float)tanh(output_reduce[0]);
if(outputs[layer*MAX_NUM_NEURONS + neuron_index] <= (float)-0.8){
outputs[layer*MAX_NUM_NEURONS + neuron_index] = (float)-0.8;
}
else if(outputs[layer*MAX_NUM_NEURONS + neuron_index] >= (float) 0.8){
outputs[layer*MAX_NUM_NEURONS + neuron_index] = (float)0.8;
}
}
__syncthreads();
}
}
__global__ void normalize_inputs(int num_threads, float *input, int size)
{
int id = threadIdx.x;
int index;
int iterator = 0;
for(iterator = 0; iterator < (int)size/num_threads+1; iterator++){
index = id + num_threads*iterator;
if(index < size){
input[index] = (((float)input[index])*1.6)/255.0 - (float)0.8;//scale the input data to -0.8 to 0.8
}
__syncthreads();
}
}
| b79e10c87621b7c311e5a13cc69c5b76e714d08e.cu | #include "../include/feed_forward.h"
//this function will determine the output of a signle layer in the network
//needs to have the number of block and threads equal to neurons and weights respectively
__global__ void eval_layer(int num_threads, int layer, int num_weights, int num_neurons, float *input, float *weights, float *outputs)
{
//each block will correspond to one neuron
int neuron_index = blockIdx.x;
//each thread will correspond to a weight of a neuron
int weight_index = threadIdx.x;
//we will need two indexes if there are not enough threads to execute each weight
int weight_index2 = 0;
if(num_threads < num_weights){
weight_index2 = weight_index + num_threads;
}
__shared__ float output_reduce[SHARED_ARRAY_SIZE];
__syncthreads();
for (int i = weight_index; i < SHARED_ARRAY_SIZE; i+=num_threads){
output_reduce[i]=(float)0.0;
}
__syncthreads();
//__shared__ float layer_input[MAX_NUM_WEIGHTS];
int neuron_weight_index = num_weights*MAX_NUM_NEURONS*layer + num_weights*neuron_index + weight_index;
int neuron_weight_index2 = num_weights*MAX_NUM_NEURONS*layer + num_weights*neuron_index + weight_index2;
if(neuron_index < num_neurons && weight_index < num_weights){
if(layer == 0){//first layer so read from the input data
output_reduce[weight_index] = weights[neuron_weight_index]*input[weight_index];
if(weight_index2 && weight_index2 < num_weights){
output_reduce[weight_index2] = weights[neuron_weight_index2]*input[weight_index2];
}
}
else{
output_reduce[weight_index] = weights[neuron_weight_index]*outputs[(layer-1)*MAX_NUM_NEURONS+weight_index];
if(weight_index2 && weight_index2 < num_weights){
output_reduce[weight_index2] = weights[neuron_weight_index2]*outputs[(layer-1)*MAX_NUM_NEURONS+weight_index2];
}
}
//calculated all the weights for each neuron, so now sync
__syncthreads();
//reduce all the weights for each neuron
for (int s = (int)SHARED_ARRAY_SIZE/2; s > 0; s>>=1) {
if(weight_index < s){
output_reduce[weight_index] += output_reduce[weight_index + s];
}
if(weight_index2 && weight_index2 < s){
output_reduce[weight_index2] += output_reduce[weight_index2 + s];
}
__syncthreads();
}
__syncthreads();
if(weight_index == 0){
//set the next layers input the the current layers output
//outputs[layer*MAX_NUM_NEURONS + neuron_index] = output_reduce[0];
outputs[layer*MAX_NUM_NEURONS + neuron_index] = (float)tanh(output_reduce[0]);
if(outputs[layer*MAX_NUM_NEURONS + neuron_index] <= (float)-0.8){
outputs[layer*MAX_NUM_NEURONS + neuron_index] = (float)-0.8;
}
else if(outputs[layer*MAX_NUM_NEURONS + neuron_index] >= (float) 0.8){
outputs[layer*MAX_NUM_NEURONS + neuron_index] = (float)0.8;
}
}
__syncthreads();
}
}
__global__ void normalize_inputs(int num_threads, float *input, int size)
{
int id = threadIdx.x;
int index;
int iterator = 0;
for(iterator = 0; iterator < (int)size/num_threads+1; iterator++){
index = id + num_threads*iterator;
if(index < size){
input[index] = (((float)input[index])*1.6)/255.0 - (float)0.8;//scale the input data to -0.8 to 0.8
}
__syncthreads();
}
}
|
5d1932ed39044e2e0be771a38bc9cad475c73712.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "../include/PointSift_cuda.h"
#include <stdio.h>
__global__ void cubeselect(int n,float radius, const float* xyz, int* idx_out)
{
int batch_idx = blockIdx.x;
xyz += batch_idx * n * 3;
idx_out += batch_idx * n * 8;
float temp_dist[8];
float judge_dist = radius * radius;
for(int i = threadIdx.x; i < n;i += blockDim.x) {
float x = xyz[i * 3];
float y = xyz[i * 3 + 1];
float z = xyz[i * 3 + 2];
for(int j = 0;j < 8;j ++) {
temp_dist[j] = 1e8;
idx_out[i * 8 + j] = i; // if not found, just return itself..
}
for(int j = 0;j < n;j ++) {
if(i != j){
float tx = xyz[j * 3];
float ty = xyz[j * 3 + 1];
float tz = xyz[j * 3 + 2];
float dist = (x - tx) * (x - tx) + (y - ty) * (y - ty) + (z - tz) * (z - tz);
if(dist <= judge_dist){
int _x = (tx > x);
int _y = (ty > y);
int _z = (tz > z);
int temp_idx = _x * 4 + _y * 2 + _z;
if(dist < temp_dist[temp_idx]) {
idx_out[i * 8 + temp_idx] = j;
temp_dist[temp_idx] = dist;
}
}
}
}
}
}
// input: points (b,n,c), idx (b,m,nsample)
// output: out (b,m,nsample,c)
__global__ void group_point_gpu(int b, int n, int c, int m, int nsample, const float *points, const int *idx, float *out)
{
int batch_index = blockIdx.x;
points += n*c*batch_index;
idx += m*nsample*batch_index;
out += m*nsample*c*batch_index;
int index = threadIdx.x;
int stride = blockDim.x;
for (int j=index;j<m;j+=stride) {
for (int k=0;k<nsample;++k) {
int ii = idx[j*nsample+k];
for (int l=0;l<c;++l) {
out[j*nsample*c+k*c+l] = points[ii*c+l];
}
}
}
}
// input: radius (1), nsample (1), xyz1 (b,n,3), xyz2 (b,m,3)
// output: idx (b,m,nsample), pts_cnt (b,m)
__global__ void query_ball_point_gpu(int b, int n, int m, float radius, int nsample, const float *xyz1, const float *xyz2, int *idx, int *pts_cnt)
{
int batch_index = blockIdx.x;
xyz1 += n*3*batch_index;
xyz2 += m*3*batch_index;
idx += m*nsample*batch_index;
pts_cnt += m*batch_index; // counting how many unique points selected in local region
int index = threadIdx.x;
int stride = blockDim.x;
for (int j=index;j<m;j+=stride) {
int cnt = 0;
for (int k=0;k<n;++k) {
if (cnt == nsample)
break; // only pick the FIRST nsample points in the ball
float x2=xyz2[j*3+0];
float y2=xyz2[j*3+1];
float z2=xyz2[j*3+2];
float x1=xyz1[k*3+0];
float y1=xyz1[k*3+1];
float z1=xyz1[k*3+2];
float d=max(sqrtf((x2-x1)*(x2-x1)+(y2-y1)*(y2-y1)+(z2-z1)*(z2-z1)),1e-20f);
if (d<radius) {
if (cnt==0) { // set ALL indices to k, s.t. if there are less points in ball than nsample, we still have valid (repeating) indices
for (int l=0;l<nsample;++l)
idx[j*nsample+l] = k;
}
idx[j*nsample+cnt] = k;
cnt+=1;
}
}
pts_cnt[j] = cnt;
}
}
__global__ void farthestpointsamplingKernel(int b,int n,int m,const float * __restrict__ dataset,float * __restrict__ temp,int * __restrict__ idxs)
{
if (m<=0)
return;
const int BlockSize=512;
__shared__ float dists[BlockSize];
__shared__ int dists_i[BlockSize];
const int BufferSize=3072;
__shared__ float buf[BufferSize*3];
for (int i=blockIdx.x;i<b;i+=gridDim.x){
int old=0;
if (threadIdx.x==0)
idxs[i*m+0]=old;
for (int j=threadIdx.x;j<n;j+=blockDim.x){
temp[blockIdx.x*n+j]=1e38;
}
for (int j=threadIdx.x;j<min(BufferSize,n)*3;j+=blockDim.x){
buf[j]=dataset[i*n*3+j];
}
__syncthreads();
for (int j=1;j<m;j++){
int besti=0;
float best=-1;
float x1=dataset[i*n*3+old*3+0];
float y1=dataset[i*n*3+old*3+1];
float z1=dataset[i*n*3+old*3+2];
for (int k=threadIdx.x;k<n;k+=blockDim.x){
float td=temp[blockIdx.x*n+k];
float x2,y2,z2;
if (k<BufferSize){
x2=buf[k*3+0];
y2=buf[k*3+1];
z2=buf[k*3+2];
}else{
x2=dataset[i*n*3+k*3+0];
y2=dataset[i*n*3+k*3+1];
z2=dataset[i*n*3+k*3+2];
}
float d=(x2-x1)*(x2-x1)+(y2-y1)*(y2-y1)+(z2-z1)*(z2-z1);
float d2=min(d,td);
if (d2!=td)
temp[blockIdx.x*n+k]=d2;
if (d2>best){
best=d2;
besti=k;
}
}
dists[threadIdx.x]=best;
dists_i[threadIdx.x]=besti;
for (int u=0;(1<<u)<blockDim.x;u++){
__syncthreads();
if (threadIdx.x<(blockDim.x>>(u+1))){
int i1=(threadIdx.x*2)<<u;
int i2=(threadIdx.x*2+1)<<u;
if (dists[i1]<dists[i2]){
dists[i1]=dists[i2];
dists_i[i1]=dists_i[i2];
}
}
}
__syncthreads();
old=dists_i[0];
if (threadIdx.x==0)
idxs[i*m+j]=old;
}
}
}
__global__ void threenn(int b, int n, int m, const float *xyz1, const float *xyz2, float *dist, int *idx)
{
for (int i=0;i<b;++i) {
for (int j=0;j<n;++j) {
float x1=xyz1[j*3+0];
float y1=xyz1[j*3+1];
float z1=xyz1[j*3+2];
double best1=1e40; double best2=1e40; double best3=1e40;
int besti1=0; int besti2=0; int besti3=0;
for (int k=0;k<m;++k) {
float x2=xyz2[k*3+0];
float y2=xyz2[k*3+1];
float z2=xyz2[k*3+2];
//float d=max(sqrtf((x2-x1)*(x2-x1)+(y2-y1)*(y2-y1)+(z2-z1)*(z2-z1)),1e-20f);
double d=(x2-x1)*(x2-x1)+(y2-y1)*(y2-y1)+(z2-z1)*(z2-z1);
if (d<best1) {
best3=best2;
besti3=besti2;
best2=best1;
besti2=besti1;
best1=d;
besti1=k;
} else if (d<best2) {
best3=best2;
besti3=besti2;
best2=d;
besti2=k;
} else if (d<best3) {
best3=d;
besti3=k;
}
}
dist[j*3]=best1;
idx[j*3]=besti1;
dist[j*3+1]=best2;
idx[j*3+1]=besti2;
dist[j*3+2]=best3;
idx[j*3+2]=besti3;
}
xyz1+=n*3;
xyz2+=m*3;
dist+=n*3;
idx+=n*3;
}
}
// input: points (b,m,c), idx (b,n,3), weight (b,n,3)
// output: out (b,n,c)
__global__ void interpolategp(int b, int m, int c, int n, const float *points, const int *idx, const float *weight, float *out)
{
float w1,w2,w3;
int i1,i2,i3;
for (int i=0;i<b;++i) {
for (int j=0;j<n;++j) {
w1=weight[j*3];
w2=weight[j*3+1];
w3=weight[j*3+2];
i1=idx[j*3];
i2=idx[j*3+1];
i3=idx[j*3+2];
for (int l=0;l<c;++l) {
out[j*c+l] = points[i1*c+l]*w1 + points[i2*c+l]*w2 + points[i3*c+l]*w3;
}
}
points+=m*c;
idx+=n*3;
weight+=n*3;
out+=n*c;
}
}
void cubeSelectLauncher(int b, int n, float radius, const float* xyz, int* idx_out) {
hipLaunchKernelGGL(( cubeselect), dim3(b), dim3(512), 0, 0, n, radius, xyz, idx_out);
}
void group_pointsLauncher(int b, int n, int c, int m, int nsamples, const float * pointsp, const int * idxp, float * outp){
hipLaunchKernelGGL(( group_point_gpu), dim3(b),dim3(256), 0, 0, b,n,c,m,nsamples,pointsp,idxp,outp);
}
void queryBallPointLauncher(int b, int n, int m, float radius, int nsample, const float *xyz1, const float *xyz2, int *idx, int *pts_cnt) {
hipLaunchKernelGGL(( query_ball_point_gpu), dim3(b),dim3(256), 0, 0, b,n,m,radius,nsample,xyz1,xyz2,idx,pts_cnt);
//hipDeviceSynchronize();
}
void farthestpointsamplingLauncher(int b,int n,int m,const float * inp,float * temp,int * out){
hipLaunchKernelGGL(( farthestpointsamplingKernel), dim3(32),dim3(512), 0, 0, b,n,m,inp,temp,out);
}
void threennLauncher(int b, int n, int m, const float *xyz1, const float *xyz2, float *dist, int *idx){
hipLaunchKernelGGL(( threenn), dim3(b),dim3(512), 0, 0, b,n,m,xyz1,xyz2,dist,idx);
}
void interpolateLauncher(int b, int m, int c, int n, const float *points, const int *idx, const float *weight, float *out){
hipLaunchKernelGGL(( interpolategp), dim3(b),dim3(512), 0, 0, b,m,c,n,points,idx,weight,out);
}
| 5d1932ed39044e2e0be771a38bc9cad475c73712.cu | #include "../include/PointSift_cuda.h"
#include <stdio.h>
__global__ void cubeselect(int n,float radius, const float* xyz, int* idx_out)
{
int batch_idx = blockIdx.x;
xyz += batch_idx * n * 3;
idx_out += batch_idx * n * 8;
float temp_dist[8];
float judge_dist = radius * radius;
for(int i = threadIdx.x; i < n;i += blockDim.x) {
float x = xyz[i * 3];
float y = xyz[i * 3 + 1];
float z = xyz[i * 3 + 2];
for(int j = 0;j < 8;j ++) {
temp_dist[j] = 1e8;
idx_out[i * 8 + j] = i; // if not found, just return itself..
}
for(int j = 0;j < n;j ++) {
if(i != j){
float tx = xyz[j * 3];
float ty = xyz[j * 3 + 1];
float tz = xyz[j * 3 + 2];
float dist = (x - tx) * (x - tx) + (y - ty) * (y - ty) + (z - tz) * (z - tz);
if(dist <= judge_dist){
int _x = (tx > x);
int _y = (ty > y);
int _z = (tz > z);
int temp_idx = _x * 4 + _y * 2 + _z;
if(dist < temp_dist[temp_idx]) {
idx_out[i * 8 + temp_idx] = j;
temp_dist[temp_idx] = dist;
}
}
}
}
}
}
// input: points (b,n,c), idx (b,m,nsample)
// output: out (b,m,nsample,c)
__global__ void group_point_gpu(int b, int n, int c, int m, int nsample, const float *points, const int *idx, float *out)
{
int batch_index = blockIdx.x;
points += n*c*batch_index;
idx += m*nsample*batch_index;
out += m*nsample*c*batch_index;
int index = threadIdx.x;
int stride = blockDim.x;
for (int j=index;j<m;j+=stride) {
for (int k=0;k<nsample;++k) {
int ii = idx[j*nsample+k];
for (int l=0;l<c;++l) {
out[j*nsample*c+k*c+l] = points[ii*c+l];
}
}
}
}
// input: radius (1), nsample (1), xyz1 (b,n,3), xyz2 (b,m,3)
// output: idx (b,m,nsample), pts_cnt (b,m)
__global__ void query_ball_point_gpu(int b, int n, int m, float radius, int nsample, const float *xyz1, const float *xyz2, int *idx, int *pts_cnt)
{
int batch_index = blockIdx.x;
xyz1 += n*3*batch_index;
xyz2 += m*3*batch_index;
idx += m*nsample*batch_index;
pts_cnt += m*batch_index; // counting how many unique points selected in local region
int index = threadIdx.x;
int stride = blockDim.x;
for (int j=index;j<m;j+=stride) {
int cnt = 0;
for (int k=0;k<n;++k) {
if (cnt == nsample)
break; // only pick the FIRST nsample points in the ball
float x2=xyz2[j*3+0];
float y2=xyz2[j*3+1];
float z2=xyz2[j*3+2];
float x1=xyz1[k*3+0];
float y1=xyz1[k*3+1];
float z1=xyz1[k*3+2];
float d=max(sqrtf((x2-x1)*(x2-x1)+(y2-y1)*(y2-y1)+(z2-z1)*(z2-z1)),1e-20f);
if (d<radius) {
if (cnt==0) { // set ALL indices to k, s.t. if there are less points in ball than nsample, we still have valid (repeating) indices
for (int l=0;l<nsample;++l)
idx[j*nsample+l] = k;
}
idx[j*nsample+cnt] = k;
cnt+=1;
}
}
pts_cnt[j] = cnt;
}
}
__global__ void farthestpointsamplingKernel(int b,int n,int m,const float * __restrict__ dataset,float * __restrict__ temp,int * __restrict__ idxs)
{
if (m<=0)
return;
const int BlockSize=512;
__shared__ float dists[BlockSize];
__shared__ int dists_i[BlockSize];
const int BufferSize=3072;
__shared__ float buf[BufferSize*3];
for (int i=blockIdx.x;i<b;i+=gridDim.x){
int old=0;
if (threadIdx.x==0)
idxs[i*m+0]=old;
for (int j=threadIdx.x;j<n;j+=blockDim.x){
temp[blockIdx.x*n+j]=1e38;
}
for (int j=threadIdx.x;j<min(BufferSize,n)*3;j+=blockDim.x){
buf[j]=dataset[i*n*3+j];
}
__syncthreads();
for (int j=1;j<m;j++){
int besti=0;
float best=-1;
float x1=dataset[i*n*3+old*3+0];
float y1=dataset[i*n*3+old*3+1];
float z1=dataset[i*n*3+old*3+2];
for (int k=threadIdx.x;k<n;k+=blockDim.x){
float td=temp[blockIdx.x*n+k];
float x2,y2,z2;
if (k<BufferSize){
x2=buf[k*3+0];
y2=buf[k*3+1];
z2=buf[k*3+2];
}else{
x2=dataset[i*n*3+k*3+0];
y2=dataset[i*n*3+k*3+1];
z2=dataset[i*n*3+k*3+2];
}
float d=(x2-x1)*(x2-x1)+(y2-y1)*(y2-y1)+(z2-z1)*(z2-z1);
float d2=min(d,td);
if (d2!=td)
temp[blockIdx.x*n+k]=d2;
if (d2>best){
best=d2;
besti=k;
}
}
dists[threadIdx.x]=best;
dists_i[threadIdx.x]=besti;
for (int u=0;(1<<u)<blockDim.x;u++){
__syncthreads();
if (threadIdx.x<(blockDim.x>>(u+1))){
int i1=(threadIdx.x*2)<<u;
int i2=(threadIdx.x*2+1)<<u;
if (dists[i1]<dists[i2]){
dists[i1]=dists[i2];
dists_i[i1]=dists_i[i2];
}
}
}
__syncthreads();
old=dists_i[0];
if (threadIdx.x==0)
idxs[i*m+j]=old;
}
}
}
__global__ void threenn(int b, int n, int m, const float *xyz1, const float *xyz2, float *dist, int *idx)
{
for (int i=0;i<b;++i) {
for (int j=0;j<n;++j) {
float x1=xyz1[j*3+0];
float y1=xyz1[j*3+1];
float z1=xyz1[j*3+2];
double best1=1e40; double best2=1e40; double best3=1e40;
int besti1=0; int besti2=0; int besti3=0;
for (int k=0;k<m;++k) {
float x2=xyz2[k*3+0];
float y2=xyz2[k*3+1];
float z2=xyz2[k*3+2];
//float d=max(sqrtf((x2-x1)*(x2-x1)+(y2-y1)*(y2-y1)+(z2-z1)*(z2-z1)),1e-20f);
double d=(x2-x1)*(x2-x1)+(y2-y1)*(y2-y1)+(z2-z1)*(z2-z1);
if (d<best1) {
best3=best2;
besti3=besti2;
best2=best1;
besti2=besti1;
best1=d;
besti1=k;
} else if (d<best2) {
best3=best2;
besti3=besti2;
best2=d;
besti2=k;
} else if (d<best3) {
best3=d;
besti3=k;
}
}
dist[j*3]=best1;
idx[j*3]=besti1;
dist[j*3+1]=best2;
idx[j*3+1]=besti2;
dist[j*3+2]=best3;
idx[j*3+2]=besti3;
}
xyz1+=n*3;
xyz2+=m*3;
dist+=n*3;
idx+=n*3;
}
}
// input: points (b,m,c), idx (b,n,3), weight (b,n,3)
// output: out (b,n,c)
__global__ void interpolategp(int b, int m, int c, int n, const float *points, const int *idx, const float *weight, float *out)
{
float w1,w2,w3;
int i1,i2,i3;
for (int i=0;i<b;++i) {
for (int j=0;j<n;++j) {
w1=weight[j*3];
w2=weight[j*3+1];
w3=weight[j*3+2];
i1=idx[j*3];
i2=idx[j*3+1];
i3=idx[j*3+2];
for (int l=0;l<c;++l) {
out[j*c+l] = points[i1*c+l]*w1 + points[i2*c+l]*w2 + points[i3*c+l]*w3;
}
}
points+=m*c;
idx+=n*3;
weight+=n*3;
out+=n*c;
}
}
void cubeSelectLauncher(int b, int n, float radius, const float* xyz, int* idx_out) {
cubeselect<<<b, 512>>>(n, radius, xyz, idx_out);
}
void group_pointsLauncher(int b, int n, int c, int m, int nsamples, const float * pointsp, const int * idxp, float * outp){
group_point_gpu<<<b,256>>>(b,n,c,m,nsamples,pointsp,idxp,outp);
}
void queryBallPointLauncher(int b, int n, int m, float radius, int nsample, const float *xyz1, const float *xyz2, int *idx, int *pts_cnt) {
query_ball_point_gpu<<<b,256>>>(b,n,m,radius,nsample,xyz1,xyz2,idx,pts_cnt);
//cudaDeviceSynchronize();
}
void farthestpointsamplingLauncher(int b,int n,int m,const float * inp,float * temp,int * out){
farthestpointsamplingKernel<<<32,512>>>(b,n,m,inp,temp,out);
}
void threennLauncher(int b, int n, int m, const float *xyz1, const float *xyz2, float *dist, int *idx){
threenn<<<b,512>>>(b,n,m,xyz1,xyz2,dist,idx);
}
void interpolateLauncher(int b, int m, int c, int n, const float *points, const int *idx, const float *weight, float *out){
interpolategp<<<b,512>>>(b,m,c,n,points,idx,weight,out);
}
|
5f374dee7384c515d4c6411844832f082859eebd.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "mainwindow.h"
#include "qcustomplot.h"
#include "ui_mainwindow.h"
__global__ void TestDouble(double *arr, float *device_double_time, int size) {
int xid = blockIdx.x * blockDim.x + threadIdx.x;
clock_t start = clock();
if (xid < size) {
for (int i = 0; i < 256; i++)
arr[xid] = arr[xid] * arr[xid] + arr[xid];
}
clock_t end = clock();
device_double_time[xid] = (float)(end - start);
}
| 5f374dee7384c515d4c6411844832f082859eebd.cu | #include "mainwindow.h"
#include "qcustomplot.h"
#include "ui_mainwindow.h"
__global__ void TestDouble(double *arr, float *device_double_time, int size) {
int xid = blockIdx.x * blockDim.x + threadIdx.x;
clock_t start = clock();
if (xid < size) {
for (int i = 0; i < 256; i++)
arr[xid] = arr[xid] * arr[xid] + arr[xid];
}
clock_t end = clock();
device_double_time[xid] = (float)(end - start);
}
|
889a543701b71964a7283efe86068d6ab3fc4fbd.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/*
-- MAGMA (version 2.4.0) --
Univ. of Tennessee, Knoxville
Univ. of California, Berkeley
Univ. of Colorado, Denver
@date June 2018
@precisions normal z -> c d s
*/
#include "magmasparse_internal.h"
#define PRECISION_z
__global__ void
zparilut_candidates_count_1(
const magma_int_t num_rows,
const magma_index_t* L0_row,
const magma_index_t* L0_col,
const magma_index_t* U0_row,
const magma_index_t* U0_col,
const magma_index_t* L_row,
const magma_index_t* L_col,
const magma_index_t* U_row,
const magma_index_t* U_col,
magma_index_t* L_new_row,
magma_index_t* U_new_row)
{
int row = blockDim.x * blockIdx.x + threadIdx.x;
//for(int row=0; row<num_rows; row++){
if (row < num_rows) {
int numaddrowL = 0;
int ilu0 = L0_row[row];
int ilut = L_row[row];
int endilu0 = L0_row[ row+1 ];
int endilut = L_row[ row+1 ];
int ilu0col;
int ilutcol;
do{
ilu0col = L0_col[ ilu0 ];
ilutcol = L_col[ ilut ];
if(ilu0col == ilutcol ){
ilu0++;
ilut++;
}
else if(ilutcol<ilu0col ){
ilut++;
}
else {
// this element is missing in the current approximation
// mark it as candidate
numaddrowL++;
ilu0++;
}
} while (ilut < endilut && ilu0 < endilu0);
// do the rest if existing
if(ilu0<endilu0 ){
do{
numaddrowL++;
ilu0++;
}while(ilu0<endilu0 );
}
L_new_row[ row ] = L_new_row[ row ]+numaddrowL;
magma_int_t numaddrowU = 0;
ilu0 = U0_row[row];
ilut = U_row[row];
endilu0 = U0_row[ row+1 ];
endilut = U_row[ row+1 ];
do{
ilu0col = U0_col[ ilu0 ];
ilutcol = U_col[ ilut ];
if(ilu0col == ilutcol ){
ilu0++;
ilut++;
}
else if(ilutcol<ilu0col ){
ilut++;
}
else {
// this element is missing in the current approximation
// mark it as candidate
numaddrowU++;
ilu0++;
}
}while(ilut<endilut && ilu0<endilu0 );
if(ilu0<endilu0 ){
do{
numaddrowU++;
ilu0++;
}while(ilu0<endilu0 );
}
U_new_row[ row ] = U_new_row[ row ]+numaddrowU;
}
}
__global__ void
zparilut_candidates_count_2(
const magma_int_t num_rows,
const magma_index_t* L0_row,
const magma_index_t* L0_col,
const magma_index_t* U0_row,
const magma_index_t* U0_col,
const magma_index_t* L_row,
const magma_index_t* L_col,
const magma_index_t* U_row,
const magma_index_t* U_col,
magma_index_t* L_new_row,
magma_index_t* U_new_row)
{
int row = blockDim.x * blockIdx.x + threadIdx.x;
//for(int row=0; row<num_rows; row++){
if (row < num_rows) {
// how to determine candidates:
// for each node i, look at any "intermediate" neighbor nodes numbered
// less, and then see if this neighbor has another neighbor j numbered
// more than the intermediate; if so, fill in is (i,j) if it is not
// already nonzero
int numaddrowL = 0, numaddrowU = 0;
// loop first element over row - only for elements smaller the diagonal
for(int el1=L_row[row]; el1<L_row[row+1]-1; el1++ ){
int col1 = L_col[ el1 ];
// now check the upper triangular
// second loop first element over row - only for elements larger the intermediate
for(int el2 = U_row[ col1 ]+1; el2 < U_row[ col1+1 ]; el2++ ){
int col2 = U_col[ el2 ];
int cand_col = col2;
// check whether this element already exists
// first case: part of L
if(cand_col < row ){
// check whether this element already exists in L
// int exist = 0;
// for(int k=L_row[cand_row]; k<L_row[cand_row+1]; k++ ){
// if(L_col[ k ] == cand_col ){
// exist = 1;
// //break;
// }
// }
// if it does not exist, increase counter for this location
// use the entry one further down to allow for parallel insertion
// if(exist == 0 ){
numaddrowL++;
// }
} else {
// check whether this element already exists in U
// int exist = 0;
// for(int k=U_row[cand_row]; k<U_row[cand_row+1]; k++ ){
// if(U_col[ k ] == cand_col ){
// exist = 1;
// //break;
// }
// }
// if(exist == 0 ){
//printf("checked row: %d this element does not yet exist in L: (%d,%d)\n", cand_row, cand_col);
numaddrowU++;
// }
}
}
}
U_new_row[ row ] = U_new_row[ row ]+numaddrowU;
L_new_row[ row ] = L_new_row[ row ]+numaddrowL;
}
}
__global__ void
zparilut_candidates_insert_1(
const magma_int_t num_rows,
const magma_index_t* L0_row,
const magma_index_t* L0_col,
const magma_index_t* U0_row,
const magma_index_t* U0_col,
const magma_index_t* L_row,
const magma_index_t* L_col,
const magma_index_t* U_row,
const magma_index_t* U_col,
magma_index_t* L_new_row,
magma_index_t* L_new_rowidx,
magma_index_t* L_new_col,
magmaDoubleComplex* L_new_val,
magma_index_t* insertedL,
magma_index_t* U_new_row,
magma_index_t* U_new_rowidx,
magma_index_t* U_new_col,
magmaDoubleComplex* U_new_val,
magma_index_t* insertedU)
{
int row = blockDim.x * blockIdx.x + threadIdx.x;
//for(int row=0; row<num_rows; row++){
if (row < num_rows) {
int laddL = 0;
int offsetL = L_new_row[row];
int ilu0 = L0_row[row];
int ilut = L_row[row];
int endilu0 = L0_row[ row+1 ];
int endilut = L_row[ row+1 ];
int ilu0col;
int ilutcol;
do{
ilu0col = L0_col[ ilu0 ];
ilutcol = L_col[ ilut ];
if(ilu0col == ilutcol ){
ilu0++;
ilut++;
}
else if(ilutcol<ilu0col ){
ilut++;
}
else {
// this element is missing in the current approximation
// mark it as candidate
L_new_col[ offsetL + laddL ] = ilu0col;
L_new_rowidx[ offsetL + laddL ] = row;
L_new_val[ offsetL + laddL ] = MAGMA_Z_ONE + MAGMA_Z_ONE + MAGMA_Z_ONE;
laddL++;
ilu0++;
}
} while(ilut<endilut && ilu0<endilu0 );
if (ilu0<endilu0){
do{
ilu0col = L0_col[ ilu0 ];
L_new_col[ offsetL + laddL ] = ilu0col;
L_new_rowidx[ offsetL + laddL ] = row;
L_new_val[ offsetL + laddL ] = MAGMA_Z_ONE + MAGMA_Z_ONE + MAGMA_Z_ONE;
laddL++;
ilu0++;
}while(ilu0<endilu0 );
}
insertedL[row] = laddL;
int laddU = 0;
int offsetU = U_new_row[row];
ilu0 = U0_row[row];
ilut = U_row[row];
endilu0 = U0_row[ row+1 ];
endilut = U_row[ row+1 ];
do{
ilu0col = U0_col[ ilu0 ];
ilutcol = U_col[ ilut ];
if(ilu0col == ilutcol ){
ilu0++;
ilut++;
}
else if(ilutcol<ilu0col ){
ilut++;
}
else {
// this element is missing in the current approximation
// mark it as candidate
U_new_col[ offsetU + laddU ] = ilu0col;
U_new_rowidx[ offsetU + laddU ] = row;
U_new_val[ offsetU + laddU ] = MAGMA_Z_ONE + MAGMA_Z_ONE + MAGMA_Z_ONE;
laddU++;
ilu0++;
}
}while(ilut<endilut && ilu0<endilu0 );
if(ilu0<endilu0 ){
do{
ilu0col = U0_col[ ilu0 ];
U_new_col[ offsetU + laddU ] = ilu0col;
U_new_rowidx[ offsetU + laddU ] = row;
U_new_val[ offsetU + laddU ] = MAGMA_Z_ONE + MAGMA_Z_ONE + MAGMA_Z_ONE;
laddU++;
ilu0++;
}while(ilu0<endilu0 );
}
insertedU[row] = laddU;
}
}
__global__ void
zparilut_candidates_insert_2(
const magma_int_t num_rows,
const magma_index_t* L0_row,
const magma_index_t* L0_col,
const magma_index_t* U0_row,
const magma_index_t* U0_col,
const magma_index_t* L_row,
const magma_index_t* L_col,
const magma_index_t* U_row,
const magma_index_t* U_col,
magma_index_t* L_new_row,
magma_index_t* L_new_rowidx,
magma_index_t* L_new_col,
magmaDoubleComplex* L_new_val,
magma_index_t* insertedL,
magma_index_t* U_new_row,
magma_index_t* U_new_rowidx,
magma_index_t* U_new_col,
magmaDoubleComplex* U_new_val,
magma_index_t* insertedU)
{
int row = blockDim.x * blockIdx.x + threadIdx.x;
//for(int row=0; row<num_rows; row++){
if (row < num_rows) {
int cand_row = row;
int laddL = 0;
int laddU = 0;
int offsetL = L_new_row[row] + insertedL[row];
int offsetU = U_new_row[row] + insertedU[row];
// loop first element over row - only for elements smaller the diagonal
for(int el1=L_row[row]; el1<L_row[row+1]-1; el1++ ){
int col1 = L_col[ el1 ];
// now check the upper triangular
// second loop first element over row - only for elements larger the intermediate
for(int el2 = U_row[ col1 ]+1; el2 < U_row[ col1+1 ]; el2++ ){
int col2 = U_col[ el2 ];
int cand_col = col2;
// check whether this element already exists
// first case: part of L
if(cand_col < row ){
int exist = 0;
for(int k=L_row[cand_row]; k<L_row[cand_row+1]; k++ ){
if(L_col[ k ] == cand_col ){
exist = -1;
// printf("already exists:(%d,%d\n", row, cand_col);
//break;
}
}
for(int k=L_new_row[cand_row]; k<L_new_row[cand_row+1]; k++){
if(L_new_col[ k ] == cand_col ){
// element included in LU and nonzero
// printf("already inserted:(%d,%d\n", row, cand_col);
exist = -2;
//break;
}
}
L_new_rowidx[ offsetL + laddL ] = cand_row;
L_new_col[ offsetL + laddL ] = (exist == 0) ? cand_col : exist;
L_new_val[ offsetL + laddL ] = (exist == 0) ? MAGMA_Z_ONE : MAGMA_Z_ZERO;
laddL++;
} else {
// check whether this element already exists in U
int exist = 0;
for(int k=U_row[cand_row]; k<U_row[cand_row+1]; k++ ){
if(U_col[ k ] == cand_col ){
// printf("already exists:(%d,%d\n", row, cand_col);
exist = -1;
//break;
}
}
for(int k=U_new_row[cand_row]; k<U_new_row[cand_row+1]; k++){
if(U_new_col[ k ] == cand_col ){
// element included in LU and nonzero
// printf("already inserted:(%d,%d==%d) k:%d -> %d -> %d\n", row, cand_col , U_new_col[ k ], U_new_row[cand_row], k, U_new_row[cand_row+1] );
exist = -2;
//break;
}
}
U_new_rowidx[ offsetU + laddU ] = cand_row;
U_new_col[ offsetU + laddU ] = (exist == 0) ? cand_col : exist;
U_new_val[ offsetU + laddU ] = (exist == 0) ? MAGMA_Z_ONE : MAGMA_Z_ZERO;
laddU++;
}
}
}
}
}
/***************************************************************************//**
Purpose
-------
This function identifies the locations with a potential nonzero ILU residual
R = A - L*U where L and U are the current incomplete factors.
Nonzero ILU residuals are possible
1 where A is nonzero but L and U have no nonzero entry
2 where the product L*U has fill-in but the location is not included
in L or U
We assume that the incomplete factors are exact fro the elements included in
the current pattern.
This is the GPU implementation of the candidate search.
2 GPU kernels are used: the first is a dry run assessing the memory need,
the second then computes the candidate locations, the third eliminates
double entries. The fourth kernel ensures the elements in a row are sorted
for increasing column index.
Arguments
---------
@param[in]
L0 magma_z_matrix
tril(ILU(0) ) pattern of original system matrix.
@param[in]
U0 magma_z_matrix
triu(ILU(0) ) pattern of original system matrix.
@param[in]
L magma_z_matrix
Current lower triangular factor.
@param[in]
U magma_z_matrix
Current upper triangular factor.
@param[in,out]
L_new magma_z_matrix*
List of candidates for L in COO format.
@param[in,out]
U_new magma_z_matrix*
List of candidates for U in COO format.
@param[in]
queue magma_queue_t
Queue to execute in.
@ingroup magmasparse_zaux
*******************************************************************************/
extern "C" magma_int_t
magma_zparilut_candidates_gpu(
magma_z_matrix L0,
magma_z_matrix U0,
magma_z_matrix L,
magma_z_matrix U,
magma_z_matrix *L_new,
magma_z_matrix *U_new,
magma_queue_t queue )
{
magma_int_t info = 0;
int num_rows = L.num_rows;
double thrs = 1e-8;
int blocksize1 = 128;
int blocksize2 = 1;
int dimgrid11 = magma_ceildiv(num_rows, blocksize1 );
int dimgrid12 = 1;
int dimgrid13 = 1;
dim3 grid1(dimgrid11, dimgrid12, dimgrid13 );
dim3 block1(blocksize1, blocksize2, 1 );
magmaIndex_ptr insertedL = NULL;
magmaIndex_ptr insertedU = NULL;
magma_zmfree(L_new, queue);
magma_zmfree(U_new, queue);
CHECK(magma_index_malloc(&insertedL, num_rows));
CHECK(magma_index_malloc(&insertedU, num_rows));
CHECK(magma_index_malloc(&L_new->drow, num_rows+1));
CHECK(magma_index_malloc(&U_new->drow, num_rows+1));
CHECK(magma_zindexinit_gpu(num_rows+1, L_new->drow, queue));
CHECK(magma_zindexinit_gpu(num_rows+1, U_new->drow, queue));
CHECK(magma_zindexinit_gpu(num_rows, insertedL, queue));
CHECK(magma_zindexinit_gpu(num_rows, insertedU, queue));
L_new->num_rows = L.num_rows;
L_new->num_cols = L.num_cols;
L_new->storage_type = Magma_CSR;
L_new->memory_location = Magma_DEV;
U_new->num_rows = L.num_rows;
U_new->num_cols = L.num_cols;
U_new->storage_type = Magma_CSR;
U_new->memory_location = Magma_DEV;
hipLaunchKernelGGL(( zparilut_candidates_count_1), dim3(grid1), dim3(block1), 0, queue->cuda_stream(),
L0.num_rows, L0.drow, L0.dcol, U0.drow, U0.dcol,
L.drow, L.dcol, U.drow, U.dcol,
insertedL, insertedU);
hipLaunchKernelGGL(( zparilut_candidates_count_2), dim3(grid1), dim3(block1), 0, queue->cuda_stream(),
L0.num_rows, L0.drow, L0.dcol, U0.drow, U0.dcol,
L.drow, L.dcol, U.drow, U.dcol,
insertedL, insertedU);
CHECK(magma_zget_row_ptr(num_rows, &L_new->nnz, insertedL,
L_new->drow, queue));
CHECK(magma_zget_row_ptr(num_rows, &U_new->nnz, insertedU,
U_new->drow, queue));
CHECK(magma_zindexinit_gpu(num_rows, insertedL, queue));
CHECK(magma_zindexinit_gpu(num_rows, insertedU, queue));
CHECK(magma_zmalloc(&L_new->dval, L_new->nnz));
CHECK(magma_index_malloc(&L_new->drowidx, L_new->nnz));
CHECK(magma_index_malloc(&L_new->dcol, L_new->nnz));
CHECK(magma_zmalloc(&U_new->dval, U_new->nnz));
CHECK(magma_index_malloc(&U_new->drowidx, U_new->nnz));
CHECK(magma_index_malloc(&U_new->dcol, U_new->nnz));
CHECK(magma_zvalinit_gpu(L_new->nnz, L_new->dval, queue));
CHECK(magma_zvalinit_gpu(U_new->nnz, U_new->dval, queue));
//CHECK(magma_zindexinit_gpu(L_new->nnz, L_new->dcol, queue));
//CHECK(magma_zindexinit_gpu(U_new->nnz, U_new->dcol, queue));
//CHECK(magma_zindexinit_gpu(L_new->nnz, L_new->drowidx, queue));
//CHECK(magma_zindexinit_gpu(U_new->nnz, U_new->drowidx, queue));
// we don't need to init rowidx and col
// the uninitilazed values will be removed anyways
hipLaunchKernelGGL(( zparilut_candidates_insert_1), dim3(grid1), dim3(block1), 0, queue->cuda_stream(),
L0.num_rows, L0.drow, L0.dcol, U0.drow, U0.dcol,
L.drow, L.dcol, U.drow, U.dcol,
L_new->drow, L_new->drowidx, L_new->dcol, L_new->dval, insertedL,
U_new->drow, U_new->drowidx, U_new->dcol, U_new->dval, insertedU);
hipLaunchKernelGGL(( zparilut_candidates_insert_2), dim3(grid1), dim3(block1), 0, queue->cuda_stream(),
L0.num_rows, L0.drow, L0.dcol, U0.drow, U0.dcol,
L.drow, L.dcol, U.drow, U.dcol,
L_new->drow, L_new->drowidx, L_new->dcol, L_new->dval, insertedL,
U_new->drow, U_new->drowidx, U_new->dcol, U_new->dval, insertedU);
CHECK(magma_zthrsholdrm_gpu(1, L_new, &thrs, queue));
CHECK(magma_zthrsholdrm_gpu(1, U_new, &thrs, queue));
cleanup:
magma_free(insertedL);
magma_free(insertedU);
return info;
}
| 889a543701b71964a7283efe86068d6ab3fc4fbd.cu | /*
-- MAGMA (version 2.4.0) --
Univ. of Tennessee, Knoxville
Univ. of California, Berkeley
Univ. of Colorado, Denver
@date June 2018
@precisions normal z -> c d s
*/
#include "magmasparse_internal.h"
#define PRECISION_z
__global__ void
zparilut_candidates_count_1(
const magma_int_t num_rows,
const magma_index_t* L0_row,
const magma_index_t* L0_col,
const magma_index_t* U0_row,
const magma_index_t* U0_col,
const magma_index_t* L_row,
const magma_index_t* L_col,
const magma_index_t* U_row,
const magma_index_t* U_col,
magma_index_t* L_new_row,
magma_index_t* U_new_row)
{
int row = blockDim.x * blockIdx.x + threadIdx.x;
//for(int row=0; row<num_rows; row++){
if (row < num_rows) {
int numaddrowL = 0;
int ilu0 = L0_row[row];
int ilut = L_row[row];
int endilu0 = L0_row[ row+1 ];
int endilut = L_row[ row+1 ];
int ilu0col;
int ilutcol;
do{
ilu0col = L0_col[ ilu0 ];
ilutcol = L_col[ ilut ];
if(ilu0col == ilutcol ){
ilu0++;
ilut++;
}
else if(ilutcol<ilu0col ){
ilut++;
}
else {
// this element is missing in the current approximation
// mark it as candidate
numaddrowL++;
ilu0++;
}
} while (ilut < endilut && ilu0 < endilu0);
// do the rest if existing
if(ilu0<endilu0 ){
do{
numaddrowL++;
ilu0++;
}while(ilu0<endilu0 );
}
L_new_row[ row ] = L_new_row[ row ]+numaddrowL;
magma_int_t numaddrowU = 0;
ilu0 = U0_row[row];
ilut = U_row[row];
endilu0 = U0_row[ row+1 ];
endilut = U_row[ row+1 ];
do{
ilu0col = U0_col[ ilu0 ];
ilutcol = U_col[ ilut ];
if(ilu0col == ilutcol ){
ilu0++;
ilut++;
}
else if(ilutcol<ilu0col ){
ilut++;
}
else {
// this element is missing in the current approximation
// mark it as candidate
numaddrowU++;
ilu0++;
}
}while(ilut<endilut && ilu0<endilu0 );
if(ilu0<endilu0 ){
do{
numaddrowU++;
ilu0++;
}while(ilu0<endilu0 );
}
U_new_row[ row ] = U_new_row[ row ]+numaddrowU;
}
}
__global__ void
zparilut_candidates_count_2(
const magma_int_t num_rows,
const magma_index_t* L0_row,
const magma_index_t* L0_col,
const magma_index_t* U0_row,
const magma_index_t* U0_col,
const magma_index_t* L_row,
const magma_index_t* L_col,
const magma_index_t* U_row,
const magma_index_t* U_col,
magma_index_t* L_new_row,
magma_index_t* U_new_row)
{
int row = blockDim.x * blockIdx.x + threadIdx.x;
//for(int row=0; row<num_rows; row++){
if (row < num_rows) {
// how to determine candidates:
// for each node i, look at any "intermediate" neighbor nodes numbered
// less, and then see if this neighbor has another neighbor j numbered
// more than the intermediate; if so, fill in is (i,j) if it is not
// already nonzero
int numaddrowL = 0, numaddrowU = 0;
// loop first element over row - only for elements smaller the diagonal
for(int el1=L_row[row]; el1<L_row[row+1]-1; el1++ ){
int col1 = L_col[ el1 ];
// now check the upper triangular
// second loop first element over row - only for elements larger the intermediate
for(int el2 = U_row[ col1 ]+1; el2 < U_row[ col1+1 ]; el2++ ){
int col2 = U_col[ el2 ];
int cand_col = col2;
// check whether this element already exists
// first case: part of L
if(cand_col < row ){
// check whether this element already exists in L
// int exist = 0;
// for(int k=L_row[cand_row]; k<L_row[cand_row+1]; k++ ){
// if(L_col[ k ] == cand_col ){
// exist = 1;
// //break;
// }
// }
// if it does not exist, increase counter for this location
// use the entry one further down to allow for parallel insertion
// if(exist == 0 ){
numaddrowL++;
// }
} else {
// check whether this element already exists in U
// int exist = 0;
// for(int k=U_row[cand_row]; k<U_row[cand_row+1]; k++ ){
// if(U_col[ k ] == cand_col ){
// exist = 1;
// //break;
// }
// }
// if(exist == 0 ){
//printf("checked row: %d this element does not yet exist in L: (%d,%d)\n", cand_row, cand_col);
numaddrowU++;
// }
}
}
}
U_new_row[ row ] = U_new_row[ row ]+numaddrowU;
L_new_row[ row ] = L_new_row[ row ]+numaddrowL;
}
}
__global__ void
zparilut_candidates_insert_1(
const magma_int_t num_rows,
const magma_index_t* L0_row,
const magma_index_t* L0_col,
const magma_index_t* U0_row,
const magma_index_t* U0_col,
const magma_index_t* L_row,
const magma_index_t* L_col,
const magma_index_t* U_row,
const magma_index_t* U_col,
magma_index_t* L_new_row,
magma_index_t* L_new_rowidx,
magma_index_t* L_new_col,
magmaDoubleComplex* L_new_val,
magma_index_t* insertedL,
magma_index_t* U_new_row,
magma_index_t* U_new_rowidx,
magma_index_t* U_new_col,
magmaDoubleComplex* U_new_val,
magma_index_t* insertedU)
{
int row = blockDim.x * blockIdx.x + threadIdx.x;
//for(int row=0; row<num_rows; row++){
if (row < num_rows) {
int laddL = 0;
int offsetL = L_new_row[row];
int ilu0 = L0_row[row];
int ilut = L_row[row];
int endilu0 = L0_row[ row+1 ];
int endilut = L_row[ row+1 ];
int ilu0col;
int ilutcol;
do{
ilu0col = L0_col[ ilu0 ];
ilutcol = L_col[ ilut ];
if(ilu0col == ilutcol ){
ilu0++;
ilut++;
}
else if(ilutcol<ilu0col ){
ilut++;
}
else {
// this element is missing in the current approximation
// mark it as candidate
L_new_col[ offsetL + laddL ] = ilu0col;
L_new_rowidx[ offsetL + laddL ] = row;
L_new_val[ offsetL + laddL ] = MAGMA_Z_ONE + MAGMA_Z_ONE + MAGMA_Z_ONE;
laddL++;
ilu0++;
}
} while(ilut<endilut && ilu0<endilu0 );
if (ilu0<endilu0){
do{
ilu0col = L0_col[ ilu0 ];
L_new_col[ offsetL + laddL ] = ilu0col;
L_new_rowidx[ offsetL + laddL ] = row;
L_new_val[ offsetL + laddL ] = MAGMA_Z_ONE + MAGMA_Z_ONE + MAGMA_Z_ONE;
laddL++;
ilu0++;
}while(ilu0<endilu0 );
}
insertedL[row] = laddL;
int laddU = 0;
int offsetU = U_new_row[row];
ilu0 = U0_row[row];
ilut = U_row[row];
endilu0 = U0_row[ row+1 ];
endilut = U_row[ row+1 ];
do{
ilu0col = U0_col[ ilu0 ];
ilutcol = U_col[ ilut ];
if(ilu0col == ilutcol ){
ilu0++;
ilut++;
}
else if(ilutcol<ilu0col ){
ilut++;
}
else {
// this element is missing in the current approximation
// mark it as candidate
U_new_col[ offsetU + laddU ] = ilu0col;
U_new_rowidx[ offsetU + laddU ] = row;
U_new_val[ offsetU + laddU ] = MAGMA_Z_ONE + MAGMA_Z_ONE + MAGMA_Z_ONE;
laddU++;
ilu0++;
}
}while(ilut<endilut && ilu0<endilu0 );
if(ilu0<endilu0 ){
do{
ilu0col = U0_col[ ilu0 ];
U_new_col[ offsetU + laddU ] = ilu0col;
U_new_rowidx[ offsetU + laddU ] = row;
U_new_val[ offsetU + laddU ] = MAGMA_Z_ONE + MAGMA_Z_ONE + MAGMA_Z_ONE;
laddU++;
ilu0++;
}while(ilu0<endilu0 );
}
insertedU[row] = laddU;
}
}
__global__ void
zparilut_candidates_insert_2(
const magma_int_t num_rows,
const magma_index_t* L0_row,
const magma_index_t* L0_col,
const magma_index_t* U0_row,
const magma_index_t* U0_col,
const magma_index_t* L_row,
const magma_index_t* L_col,
const magma_index_t* U_row,
const magma_index_t* U_col,
magma_index_t* L_new_row,
magma_index_t* L_new_rowidx,
magma_index_t* L_new_col,
magmaDoubleComplex* L_new_val,
magma_index_t* insertedL,
magma_index_t* U_new_row,
magma_index_t* U_new_rowidx,
magma_index_t* U_new_col,
magmaDoubleComplex* U_new_val,
magma_index_t* insertedU)
{
int row = blockDim.x * blockIdx.x + threadIdx.x;
//for(int row=0; row<num_rows; row++){
if (row < num_rows) {
int cand_row = row;
int laddL = 0;
int laddU = 0;
int offsetL = L_new_row[row] + insertedL[row];
int offsetU = U_new_row[row] + insertedU[row];
// loop first element over row - only for elements smaller the diagonal
for(int el1=L_row[row]; el1<L_row[row+1]-1; el1++ ){
int col1 = L_col[ el1 ];
// now check the upper triangular
// second loop first element over row - only for elements larger the intermediate
for(int el2 = U_row[ col1 ]+1; el2 < U_row[ col1+1 ]; el2++ ){
int col2 = U_col[ el2 ];
int cand_col = col2;
// check whether this element already exists
// first case: part of L
if(cand_col < row ){
int exist = 0;
for(int k=L_row[cand_row]; k<L_row[cand_row+1]; k++ ){
if(L_col[ k ] == cand_col ){
exist = -1;
// printf("already exists:(%d,%d\n", row, cand_col);
//break;
}
}
for(int k=L_new_row[cand_row]; k<L_new_row[cand_row+1]; k++){
if(L_new_col[ k ] == cand_col ){
// element included in LU and nonzero
// printf("already inserted:(%d,%d\n", row, cand_col);
exist = -2;
//break;
}
}
L_new_rowidx[ offsetL + laddL ] = cand_row;
L_new_col[ offsetL + laddL ] = (exist == 0) ? cand_col : exist;
L_new_val[ offsetL + laddL ] = (exist == 0) ? MAGMA_Z_ONE : MAGMA_Z_ZERO;
laddL++;
} else {
// check whether this element already exists in U
int exist = 0;
for(int k=U_row[cand_row]; k<U_row[cand_row+1]; k++ ){
if(U_col[ k ] == cand_col ){
// printf("already exists:(%d,%d\n", row, cand_col);
exist = -1;
//break;
}
}
for(int k=U_new_row[cand_row]; k<U_new_row[cand_row+1]; k++){
if(U_new_col[ k ] == cand_col ){
// element included in LU and nonzero
// printf("already inserted:(%d,%d==%d) k:%d -> %d -> %d\n", row, cand_col , U_new_col[ k ], U_new_row[cand_row], k, U_new_row[cand_row+1] );
exist = -2;
//break;
}
}
U_new_rowidx[ offsetU + laddU ] = cand_row;
U_new_col[ offsetU + laddU ] = (exist == 0) ? cand_col : exist;
U_new_val[ offsetU + laddU ] = (exist == 0) ? MAGMA_Z_ONE : MAGMA_Z_ZERO;
laddU++;
}
}
}
}
}
/***************************************************************************//**
Purpose
-------
This function identifies the locations with a potential nonzero ILU residual
R = A - L*U where L and U are the current incomplete factors.
Nonzero ILU residuals are possible
1 where A is nonzero but L and U have no nonzero entry
2 where the product L*U has fill-in but the location is not included
in L or U
We assume that the incomplete factors are exact fro the elements included in
the current pattern.
This is the GPU implementation of the candidate search.
2 GPU kernels are used: the first is a dry run assessing the memory need,
the second then computes the candidate locations, the third eliminates
double entries. The fourth kernel ensures the elements in a row are sorted
for increasing column index.
Arguments
---------
@param[in]
L0 magma_z_matrix
tril(ILU(0) ) pattern of original system matrix.
@param[in]
U0 magma_z_matrix
triu(ILU(0) ) pattern of original system matrix.
@param[in]
L magma_z_matrix
Current lower triangular factor.
@param[in]
U magma_z_matrix
Current upper triangular factor.
@param[in,out]
L_new magma_z_matrix*
List of candidates for L in COO format.
@param[in,out]
U_new magma_z_matrix*
List of candidates for U in COO format.
@param[in]
queue magma_queue_t
Queue to execute in.
@ingroup magmasparse_zaux
*******************************************************************************/
extern "C" magma_int_t
magma_zparilut_candidates_gpu(
magma_z_matrix L0,
magma_z_matrix U0,
magma_z_matrix L,
magma_z_matrix U,
magma_z_matrix *L_new,
magma_z_matrix *U_new,
magma_queue_t queue )
{
magma_int_t info = 0;
int num_rows = L.num_rows;
double thrs = 1e-8;
int blocksize1 = 128;
int blocksize2 = 1;
int dimgrid11 = magma_ceildiv(num_rows, blocksize1 );
int dimgrid12 = 1;
int dimgrid13 = 1;
dim3 grid1(dimgrid11, dimgrid12, dimgrid13 );
dim3 block1(blocksize1, blocksize2, 1 );
magmaIndex_ptr insertedL = NULL;
magmaIndex_ptr insertedU = NULL;
magma_zmfree(L_new, queue);
magma_zmfree(U_new, queue);
CHECK(magma_index_malloc(&insertedL, num_rows));
CHECK(magma_index_malloc(&insertedU, num_rows));
CHECK(magma_index_malloc(&L_new->drow, num_rows+1));
CHECK(magma_index_malloc(&U_new->drow, num_rows+1));
CHECK(magma_zindexinit_gpu(num_rows+1, L_new->drow, queue));
CHECK(magma_zindexinit_gpu(num_rows+1, U_new->drow, queue));
CHECK(magma_zindexinit_gpu(num_rows, insertedL, queue));
CHECK(magma_zindexinit_gpu(num_rows, insertedU, queue));
L_new->num_rows = L.num_rows;
L_new->num_cols = L.num_cols;
L_new->storage_type = Magma_CSR;
L_new->memory_location = Magma_DEV;
U_new->num_rows = L.num_rows;
U_new->num_cols = L.num_cols;
U_new->storage_type = Magma_CSR;
U_new->memory_location = Magma_DEV;
zparilut_candidates_count_1<<<grid1, block1, 0, queue->cuda_stream()>>>(
L0.num_rows, L0.drow, L0.dcol, U0.drow, U0.dcol,
L.drow, L.dcol, U.drow, U.dcol,
insertedL, insertedU);
zparilut_candidates_count_2<<<grid1, block1, 0, queue->cuda_stream()>>>(
L0.num_rows, L0.drow, L0.dcol, U0.drow, U0.dcol,
L.drow, L.dcol, U.drow, U.dcol,
insertedL, insertedU);
CHECK(magma_zget_row_ptr(num_rows, &L_new->nnz, insertedL,
L_new->drow, queue));
CHECK(magma_zget_row_ptr(num_rows, &U_new->nnz, insertedU,
U_new->drow, queue));
CHECK(magma_zindexinit_gpu(num_rows, insertedL, queue));
CHECK(magma_zindexinit_gpu(num_rows, insertedU, queue));
CHECK(magma_zmalloc(&L_new->dval, L_new->nnz));
CHECK(magma_index_malloc(&L_new->drowidx, L_new->nnz));
CHECK(magma_index_malloc(&L_new->dcol, L_new->nnz));
CHECK(magma_zmalloc(&U_new->dval, U_new->nnz));
CHECK(magma_index_malloc(&U_new->drowidx, U_new->nnz));
CHECK(magma_index_malloc(&U_new->dcol, U_new->nnz));
CHECK(magma_zvalinit_gpu(L_new->nnz, L_new->dval, queue));
CHECK(magma_zvalinit_gpu(U_new->nnz, U_new->dval, queue));
//CHECK(magma_zindexinit_gpu(L_new->nnz, L_new->dcol, queue));
//CHECK(magma_zindexinit_gpu(U_new->nnz, U_new->dcol, queue));
//CHECK(magma_zindexinit_gpu(L_new->nnz, L_new->drowidx, queue));
//CHECK(magma_zindexinit_gpu(U_new->nnz, U_new->drowidx, queue));
// we don't need to init rowidx and col
// the uninitilazed values will be removed anyways
zparilut_candidates_insert_1<<<grid1, block1, 0, queue->cuda_stream()>>>(
L0.num_rows, L0.drow, L0.dcol, U0.drow, U0.dcol,
L.drow, L.dcol, U.drow, U.dcol,
L_new->drow, L_new->drowidx, L_new->dcol, L_new->dval, insertedL,
U_new->drow, U_new->drowidx, U_new->dcol, U_new->dval, insertedU);
zparilut_candidates_insert_2<<<grid1, block1, 0, queue->cuda_stream()>>>(
L0.num_rows, L0.drow, L0.dcol, U0.drow, U0.dcol,
L.drow, L.dcol, U.drow, U.dcol,
L_new->drow, L_new->drowidx, L_new->dcol, L_new->dval, insertedL,
U_new->drow, U_new->drowidx, U_new->dcol, U_new->dval, insertedU);
CHECK(magma_zthrsholdrm_gpu(1, L_new, &thrs, queue));
CHECK(magma_zthrsholdrm_gpu(1, U_new, &thrs, queue));
cleanup:
magma_free(insertedL);
magma_free(insertedU);
return info;
}
|
e009ede3802fde99d75e57b07b6e7e17dbd7cf0c.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/*
Copyright 2020 The OneFlow Authors. All rights reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
#include "oneflow/core/framework/framework.h"
#include "oneflow/core/device/cuda_util.h"
#include "oneflow/core/kernel/new_kernel_util.h"
namespace oneflow {
namespace {
template<typename T, int arity>
struct Params {
const T* inputs[arity];
float weights[arity];
float alpha{};
T* output;
int64_t n;
};
template<typename T, int arity, bool acc>
__global__ void WeightedSumKernel(Params<T, arity> params) {
CUDA_1D_KERNEL_LOOP_T(int64_t, i, params.n) {
T out = 0;
if (acc) { out = params.output[i]; }
#pragma unroll
for (int j = 0; j < arity; ++j) {
out += params.inputs[j][i] * static_cast<T>(params.weights[j]);
}
params.output[i] = out * static_cast<T>(params.alpha);
}
}
template<typename T, int arity, bool acc>
void LaunchWeightedSum(ep::Stream* stream, int n, const T** inputs, const float* weights,
float alpha, T* output) {
Params<T, arity> params{};
for (int i = 0; i < arity; ++i) {
params.inputs[i] = *(inputs + i);
params.weights[i] = *(weights + i);
}
params.alpha = alpha;
params.output = output;
params.n = n;
RUN_CUDA_KERNEL((WeightedSumKernel<T, arity, acc>), stream, n, params);
}
template<typename T, bool acc>
void DispatchWeightedSum(ep::Stream* stream, int arity, int64_t n, const T** inputs,
const float* weights, float alpha, T* output) {
if (arity == 1) {
LaunchWeightedSum<T, 1, acc>(stream, n, inputs, weights, alpha, output);
} else if (arity == 2) {
LaunchWeightedSum<T, 2, acc>(stream, n, inputs, weights, alpha, output);
} else if (arity == 3) {
LaunchWeightedSum<T, 3, acc>(stream, n, inputs, weights, alpha, output);
} else if (arity == 4) {
LaunchWeightedSum<T, 4, acc>(stream, n, inputs, weights, alpha, output);
} else if (arity == 5) {
LaunchWeightedSum<T, 5, acc>(stream, n, inputs, weights, alpha, output);
} else if (arity == 6) {
LaunchWeightedSum<T, 6, acc>(stream, n, inputs, weights, alpha, output);
} else if (arity == 7) {
LaunchWeightedSum<T, 7, acc>(stream, n, inputs, weights, alpha, output);
} else if (arity == 8) {
LaunchWeightedSum<T, 8, acc>(stream, n, inputs, weights, alpha, output);
} else if (arity > 8) {
LaunchWeightedSum<T, 8, acc>(stream, n, inputs, weights, 1.0F, output);
DispatchWeightedSum<T, true>(stream, arity - 8, n, inputs + 8, weights + 8, alpha, output);
} else {
UNIMPLEMENTED();
}
}
template<typename T>
class FusedWeightedSumKernel final : public user_op::OpKernel {
public:
FusedWeightedSumKernel() = default;
~FusedWeightedSumKernel() override = default;
private:
using user_op::OpKernel::Compute;
void Compute(user_op::KernelComputeContext* ctx) const override {
user_op::Tensor* out = ctx->Tensor4ArgNameAndIndex("out", 0);
const int64_t arity = ctx->input_size("in");
CHECK_GE(arity, 1) << "input_size should be greater than 0.";
const std::vector<float>& weights = ctx->Attr<std::vector<float>>("weights");
CHECK_EQ(weights.size(), arity);
const float alpha = ctx->Attr<float>("alpha");
const DataType data_type = out->data_type();
const ShapeView& shape = out->shape_view();
std::vector<const T*> inputs(arity);
for (int i = 0; i < arity; ++i) {
const user_op::Tensor* in_i = ctx->Tensor4ArgNameAndIndex("in", i);
CHECK(in_i->shape_view() == shape);
CHECK_EQ(in_i->data_type(), data_type);
inputs[i] = in_i->dptr<T>();
}
DispatchWeightedSum<T, false>(ctx->stream(), arity, shape.elem_cnt(), inputs.data(),
weights.data(), alpha, out->mut_dptr<T>());
}
bool AlwaysComputeWhenAllOutputsEmpty() const override { return false; }
};
} // namespace
#define REGISTER_FUSED_WEIGHT_SUM_KERNEL(data_type, cpp_type) \
REGISTER_USER_KERNEL("fused_weighted_sum") \
.SetCreateFn<FusedWeightedSumKernel<cpp_type>>() \
.SetIsMatchedHob((user_op::HobDeviceType() == DeviceType::kCUDA) \
&& (user_op::HobDataType("out", 0) == data_type))
REGISTER_FUSED_WEIGHT_SUM_KERNEL(DataType::kDouble, double);
REGISTER_FUSED_WEIGHT_SUM_KERNEL(DataType::kFloat, float);
REGISTER_FUSED_WEIGHT_SUM_KERNEL(DataType::kFloat16, half);
} // namespace oneflow
| e009ede3802fde99d75e57b07b6e7e17dbd7cf0c.cu | /*
Copyright 2020 The OneFlow Authors. All rights reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
#include "oneflow/core/framework/framework.h"
#include "oneflow/core/device/cuda_util.h"
#include "oneflow/core/kernel/new_kernel_util.h"
namespace oneflow {
namespace {
template<typename T, int arity>
struct Params {
const T* inputs[arity];
float weights[arity];
float alpha{};
T* output;
int64_t n;
};
template<typename T, int arity, bool acc>
__global__ void WeightedSumKernel(Params<T, arity> params) {
CUDA_1D_KERNEL_LOOP_T(int64_t, i, params.n) {
T out = 0;
if (acc) { out = params.output[i]; }
#pragma unroll
for (int j = 0; j < arity; ++j) {
out += params.inputs[j][i] * static_cast<T>(params.weights[j]);
}
params.output[i] = out * static_cast<T>(params.alpha);
}
}
template<typename T, int arity, bool acc>
void LaunchWeightedSum(ep::Stream* stream, int n, const T** inputs, const float* weights,
float alpha, T* output) {
Params<T, arity> params{};
for (int i = 0; i < arity; ++i) {
params.inputs[i] = *(inputs + i);
params.weights[i] = *(weights + i);
}
params.alpha = alpha;
params.output = output;
params.n = n;
RUN_CUDA_KERNEL((WeightedSumKernel<T, arity, acc>), stream, n, params);
}
template<typename T, bool acc>
void DispatchWeightedSum(ep::Stream* stream, int arity, int64_t n, const T** inputs,
const float* weights, float alpha, T* output) {
if (arity == 1) {
LaunchWeightedSum<T, 1, acc>(stream, n, inputs, weights, alpha, output);
} else if (arity == 2) {
LaunchWeightedSum<T, 2, acc>(stream, n, inputs, weights, alpha, output);
} else if (arity == 3) {
LaunchWeightedSum<T, 3, acc>(stream, n, inputs, weights, alpha, output);
} else if (arity == 4) {
LaunchWeightedSum<T, 4, acc>(stream, n, inputs, weights, alpha, output);
} else if (arity == 5) {
LaunchWeightedSum<T, 5, acc>(stream, n, inputs, weights, alpha, output);
} else if (arity == 6) {
LaunchWeightedSum<T, 6, acc>(stream, n, inputs, weights, alpha, output);
} else if (arity == 7) {
LaunchWeightedSum<T, 7, acc>(stream, n, inputs, weights, alpha, output);
} else if (arity == 8) {
LaunchWeightedSum<T, 8, acc>(stream, n, inputs, weights, alpha, output);
} else if (arity > 8) {
LaunchWeightedSum<T, 8, acc>(stream, n, inputs, weights, 1.0F, output);
DispatchWeightedSum<T, true>(stream, arity - 8, n, inputs + 8, weights + 8, alpha, output);
} else {
UNIMPLEMENTED();
}
}
template<typename T>
class FusedWeightedSumKernel final : public user_op::OpKernel {
public:
FusedWeightedSumKernel() = default;
~FusedWeightedSumKernel() override = default;
private:
using user_op::OpKernel::Compute;
void Compute(user_op::KernelComputeContext* ctx) const override {
user_op::Tensor* out = ctx->Tensor4ArgNameAndIndex("out", 0);
const int64_t arity = ctx->input_size("in");
CHECK_GE(arity, 1) << "input_size should be greater than 0.";
const std::vector<float>& weights = ctx->Attr<std::vector<float>>("weights");
CHECK_EQ(weights.size(), arity);
const float alpha = ctx->Attr<float>("alpha");
const DataType data_type = out->data_type();
const ShapeView& shape = out->shape_view();
std::vector<const T*> inputs(arity);
for (int i = 0; i < arity; ++i) {
const user_op::Tensor* in_i = ctx->Tensor4ArgNameAndIndex("in", i);
CHECK(in_i->shape_view() == shape);
CHECK_EQ(in_i->data_type(), data_type);
inputs[i] = in_i->dptr<T>();
}
DispatchWeightedSum<T, false>(ctx->stream(), arity, shape.elem_cnt(), inputs.data(),
weights.data(), alpha, out->mut_dptr<T>());
}
bool AlwaysComputeWhenAllOutputsEmpty() const override { return false; }
};
} // namespace
#define REGISTER_FUSED_WEIGHT_SUM_KERNEL(data_type, cpp_type) \
REGISTER_USER_KERNEL("fused_weighted_sum") \
.SetCreateFn<FusedWeightedSumKernel<cpp_type>>() \
.SetIsMatchedHob((user_op::HobDeviceType() == DeviceType::kCUDA) \
&& (user_op::HobDataType("out", 0) == data_type))
REGISTER_FUSED_WEIGHT_SUM_KERNEL(DataType::kDouble, double);
REGISTER_FUSED_WEIGHT_SUM_KERNEL(DataType::kFloat, float);
REGISTER_FUSED_WEIGHT_SUM_KERNEL(DataType::kFloat16, half);
} // namespace oneflow
|
79b83f057250266c89b2e7b477f3b5f38f76d925.hip | // !!! This is a file automatically generated by hipify!!!
#include <assert.h>
#include <hip/hip_runtime.h>
#include <stdio.h>
#include <stdlib.h>
#include <string.h>
#include <sys/time.h>
#define GRAPH_SIZE 2000
#define EDGE_COST(graph, graph_size, a, b) graph[a * graph_size + b]
#define D(a, b) EDGE_COST(output, graph_size, a, b)
#define INF 0x1fffffff
void generate_random_graph(int *output, int graph_size) {
int i, j;
srand(0xdadadada);
for (i = 0; i < graph_size; i++) {
for (j = 0; j < graph_size; j++) {
if (i == j) {
D(i, j) = 0;
} else {
int r;
r = rand() % 40;
if (r > 20) {
r = INF;
}
D(i, j) = r;
}
}
}
}
void floyd_warshall_gpu(const int *graph, int graph_size, int *output) {
// TODO
}
void floyd_warshall_cpu(const int *graph, int graph_size, int *output) {
int i, j, k;
memcpy(output, graph, sizeof(int) * graph_size * graph_size);
for (k = 0; k < graph_size; k++) {
for (i = 0; i < graph_size; i++) {
for (j = 0; j < graph_size; j++) {
if (D(i, k) + D(k, j) < D(i, j)) {
D(i, j) = D(i, k) + D(k, j);
}
}
}
}
}
int main(int argc, char **argv) {
#define TIMER_START() gettimeofday(&tv1, NULL)
#define TIMER_STOP() \
gettimeofday(&tv2, NULL); \
timersub(&tv2, &tv1, &tv); \
time_delta = (float)tv.tv_sec + tv.tv_usec / 1000000.0
struct timeval tv1, tv2, tv;
float time_delta;
int *graph, *output_cpu, *output_gpu;
int size;
size = sizeof(int) * GRAPH_SIZE * GRAPH_SIZE;
graph = (int *)malloc(size);
assert(graph);
output_cpu = (int *)malloc(size);
assert(output_cpu);
memset(output_cpu, 0, size);
output_gpu = (int *)malloc(size);
assert(output_gpu);
generate_random_graph(graph, GRAPH_SIZE);
fprintf(stderr, "running on cpu...\n");
TIMER_START();
floyd_warshall_cpu(graph, GRAPH_SIZE, output_cpu);
TIMER_STOP();
fprintf(stderr, "%f secs\n", time_delta);
fprintf(stderr, "running on gpu...\n");
TIMER_START();
floyd_warshall_gpu(graph, GRAPH_SIZE, output_gpu);
TIMER_STOP();
fprintf(stderr, "%f secs\n", time_delta);
if (memcmp(output_cpu, output_gpu, size) != 0) {
fprintf(stderr, "FAIL!\n");
}
return 0;
}
| 79b83f057250266c89b2e7b477f3b5f38f76d925.cu | #include <assert.h>
#include <cuda.h>
#include <stdio.h>
#include <stdlib.h>
#include <string.h>
#include <sys/time.h>
#define GRAPH_SIZE 2000
#define EDGE_COST(graph, graph_size, a, b) graph[a * graph_size + b]
#define D(a, b) EDGE_COST(output, graph_size, a, b)
#define INF 0x1fffffff
void generate_random_graph(int *output, int graph_size) {
int i, j;
srand(0xdadadada);
for (i = 0; i < graph_size; i++) {
for (j = 0; j < graph_size; j++) {
if (i == j) {
D(i, j) = 0;
} else {
int r;
r = rand() % 40;
if (r > 20) {
r = INF;
}
D(i, j) = r;
}
}
}
}
void floyd_warshall_gpu(const int *graph, int graph_size, int *output) {
// TODO
}
void floyd_warshall_cpu(const int *graph, int graph_size, int *output) {
int i, j, k;
memcpy(output, graph, sizeof(int) * graph_size * graph_size);
for (k = 0; k < graph_size; k++) {
for (i = 0; i < graph_size; i++) {
for (j = 0; j < graph_size; j++) {
if (D(i, k) + D(k, j) < D(i, j)) {
D(i, j) = D(i, k) + D(k, j);
}
}
}
}
}
int main(int argc, char **argv) {
#define TIMER_START() gettimeofday(&tv1, NULL)
#define TIMER_STOP() \
gettimeofday(&tv2, NULL); \
timersub(&tv2, &tv1, &tv); \
time_delta = (float)tv.tv_sec + tv.tv_usec / 1000000.0
struct timeval tv1, tv2, tv;
float time_delta;
int *graph, *output_cpu, *output_gpu;
int size;
size = sizeof(int) * GRAPH_SIZE * GRAPH_SIZE;
graph = (int *)malloc(size);
assert(graph);
output_cpu = (int *)malloc(size);
assert(output_cpu);
memset(output_cpu, 0, size);
output_gpu = (int *)malloc(size);
assert(output_gpu);
generate_random_graph(graph, GRAPH_SIZE);
fprintf(stderr, "running on cpu...\n");
TIMER_START();
floyd_warshall_cpu(graph, GRAPH_SIZE, output_cpu);
TIMER_STOP();
fprintf(stderr, "%f secs\n", time_delta);
fprintf(stderr, "running on gpu...\n");
TIMER_START();
floyd_warshall_gpu(graph, GRAPH_SIZE, output_gpu);
TIMER_STOP();
fprintf(stderr, "%f secs\n", time_delta);
if (memcmp(output_cpu, output_gpu, size) != 0) {
fprintf(stderr, "FAIL!\n");
}
return 0;
}
|
67439f810bcd0ff630783e60cd103a69d13ecaf3.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
# include <bits/stdc++.h>
# include <hip/hip_runtime.h>
#define TILE_WIDTH 32 //(TITLE_WIDTH = BLOCKSIZE)
using namespace std;
// ::::::::::::::::::::::::::::::::::::::::::GPU::::::::::::::::::::::::::::::::
__global__ void KernelNormalMul(int *Mat1,int *Mat2,int *Mat3,int m,int n,int p){
int j = threadIdx.y + blockDim.y * blockIdx.y; // row
int i = threadIdx.x + blockDim.x * blockIdx.x; // col
if((j<m) && (i<p)){
int value=0;
for(int k=0;k<n;++k){
value+=Mat1[n*j+k]*Mat2[p*k+i];
}
Mat3[p*j+i]=value;
}
}
__global__ void KernelTilesMul(int *Mat1,int *Mat2,int *Mat3,int rowM1,int colM1,int colM2){
__shared__ int Mds[TILE_WIDTH][TILE_WIDTH];
__shared__ int Nds[TILE_WIDTH][TILE_WIDTH];
int bx = blockIdx.x;
int by = blockIdx.y;
int tx = threadIdx.x;
int ty = threadIdx.y;
int row = by * TILE_WIDTH + ty;
int col = bx * TILE_WIDTH + tx;
int Pvalue = 0;
for(int k = 0; k < (colM1+TILE_WIDTH-1)/(TILE_WIDTH); ++k){
if(k*TILE_WIDTH + tx < colM1 && row < rowM1){
Mds[ty][tx] = Mat1[row*colM1 + k*TILE_WIDTH + tx];
}else{
Mds[ty][tx] = 0;
}
if(k*TILE_WIDTH + ty < colM1 && col < colM2){
Nds[ty][tx] = Mat2[(k*TILE_WIDTH + ty) * colM2 + col];
}else{
Nds[ty][tx] =0;
}
__syncthreads();
for(int k = 0; k < TILE_WIDTH; ++k){
Pvalue += Mds[ty][k] * Nds[k][tx];
}
__syncthreads();
}
if (row < rowM1 && col < colM2){
Mat3[row*colM2+col] = Pvalue;
}
}
void d_MatrixMult(int *Mat1,int *Mat2,int *Mat3,int rowM1,int colM1,int colM2, int op ){
int * d_Mat1;
int * d_Mat2;
int * d_Mat3;
float Blocksize=TILE_WIDTH; // Bloque de 2 dimensiones 32*32=256 nmero de blokes= 1024 (1024/256=4)
int size1=rowM1*colM1;
int size2=colM1*colM2;
int size3=rowM1*colM2;
// 1. Separamos memoria en el device
hipMalloc(&d_Mat1,size1*sizeof(int));
hipMalloc(&d_Mat2,size2*sizeof(int));
hipMalloc(&d_Mat3,size3*sizeof(int));
// 2. Copiamos el valor de las variables de host a las variables del device.
hipMemcpy(d_Mat1, Mat1,size1*sizeof(int),hipMemcpyHostToDevice);
hipMemcpy(d_Mat2, Mat2,size2*sizeof(int),hipMemcpyHostToDevice);
// 3. Lgica de bloques e hilos, elementos para realizar la parelelizacin.
dim3 dimGrid(ceil(colM2/Blocksize),ceil(rowM1/Blocksize),1);
//dim3 dimGrid((m+Blocksize-1)/Blocksize,(p+Blocksize-1)/Blocksize,1);
dim3 dimBlock(Blocksize,Blocksize,1);
// 4. Invocacin del kernel (invocin del host, ejecutadas en el device), <<<<#dimGrid,#dimBlock>>>
if(op==1)hipLaunchKernelGGL(({KernelNormalMul), dim3(dimGrid),dim3(dimBlock), 0, 0, d_Mat1,d_Mat2,d_Mat3,rowM1,colM1,colM2);}else{
hipLaunchKernelGGL(( KernelTilesMul), dim3(dimGrid),dim3(dimBlock), 0, 0, d_Mat1,d_Mat2,d_Mat3,rowM1,colM1,colM2);
}
// 5. Copiamos el resultado para mostrar en el I/O del host.
hipMemcpy (Mat3,d_Mat3,size3*sizeof(int),hipMemcpyDeviceToHost);
// 6. Liberamos memoria.
hipFree(d_Mat3);
}
// :::::::::::::::::::::::::::::::::::::::Normal::::::::::::::::::::::::::::::::
void h_Mul_Mat(int *Mat1,int *Mat2, int *Mat3,int m,int n,int p){
for(int i=0;i<m;i++){
for(int j=0;j<p;j++){
int value=0;
for(int k=0;k<n;k++){
value+=Mat1[n*i+k]*Mat2[p*k+j];
}
Mat3[p*i+j]=value;
}
}
}
void llena_mat(int *Mat, int Value,int m,int n){// ver matriz como vector serial.
int size=n*m; // matriz lineal
for(int i =0 ; i<size ; i++){
Mat[i]=Value;
}
}
void mostrar_mat(int *Mat,int m,int n){//
int size=n*m; // matriz lineal
for (int i=0;i<size;i++) {
if(i%n==0 && n!=0){
cout<<endl;
}
cout<<"["<<Mat[i]<<"] ";
}
cout<<endl;
}
int check_mat(int *Mat1,int *Mat2,int m,int p){
for(int i=0; i<(m*p);++i){
if(Mat1[i]!=Mat2[i]){
cout<<"Error, Las matrices no son iguales"<<endl;
return 0;
}
}
cout<<"Las Matrices son iguales"<<endl;
return 0;
}
// :::::::::::::::::::::::::::::::::::Clock Function::::::::::::::::::::::::::::
double diffclock(clock_t clock1,clock_t clock2){
double diffticks=clock2-clock1;
double diffms=(diffticks)/(CLOCKS_PER_SEC/1); // /1000 mili
return diffms;
}
// :::::::::::::::::::::::::::::::::::::::Main::::::::::::::::::::::::::::::::.
int main(){
double T1,T2,T3; // variables de tiempo
int rowM1=2;
int colM1=4;
int colM2=4;
int *Mat1 = (int*)malloc((rowM1*colM1)*sizeof(int));
int *Mat2 = (int*)malloc((colM1*colM2)*sizeof(int));
int *Mat3 = (int*)malloc((rowM1*colM2)*sizeof(int));
int *Mat4 = (int*)malloc((rowM1*colM2)*sizeof(int));
int *Mat5 = (int*)malloc((rowM1*colM2)*sizeof(int));
llena_mat(Mat1,1,rowM1,colM1);
llena_mat(Mat2,1,colM1,colM2);
clock_t start = clock();
h_Mul_Mat(Mat1,Mat2,Mat3,rowM1,colM1,colM2);
clock_t end = clock();
T1=diffclock(start,end);
cout <<"Tiempo secuencial: "<<T1<<endl;
mostrar_mat(Mat3,rowM1,colM2);
clock_t start2 = clock();
d_MatrixMult(Mat1,Mat2,Mat4,rowM1,colM1,colM2,1); // paralelo
clock_t end2 = clock();
mostrar_mat(Mat4,rowM1,colM2);
T2=diffclock(start2,end2);
cout <<"Tiempo Paralelo: "<<T2<<endl;
cout<<"Aceleracin lograda: "<<T1/T2<<endl;
check_mat(Mat3,Mat4,rowM1,colM2);
clock_t start3 = clock();
d_MatrixMult(Mat1,Mat2,Mat5,rowM1,colM1,colM2,2); // tiles
mostrar_mat(Mat5,rowM1,colM2);
clock_t end3 = clock();
T3=diffclock(start3,end3);
cout <<"Tiempo Paralelo con Tiles: "<<T3<<endl;
cout<<"Aceleracin lograda Respecto a el tiempo paralelo: "<<T2/T3<<endl;
check_mat(Mat4,Mat5,rowM1,colM2);
free(M1);
free(M2);
free(M3);
free(M4);
free(M5);
return 0;
}
// http://www.techdarting.com/2014/03/matrix-multiplication-in-cuda-using.html
| 67439f810bcd0ff630783e60cd103a69d13ecaf3.cu | # include <bits/stdc++.h>
# include <cuda.h>
#define TILE_WIDTH 32 //(TITLE_WIDTH = BLOCKSIZE)
using namespace std;
// ::::::::::::::::::::::::::::::::::::::::::GPU::::::::::::::::::::::::::::::::
__global__ void KernelNormalMul(int *Mat1,int *Mat2,int *Mat3,int m,int n,int p){
int j = threadIdx.y + blockDim.y * blockIdx.y; // row
int i = threadIdx.x + blockDim.x * blockIdx.x; // col
if((j<m) && (i<p)){
int value=0;
for(int k=0;k<n;++k){
value+=Mat1[n*j+k]*Mat2[p*k+i];
}
Mat3[p*j+i]=value;
}
}
__global__ void KernelTilesMul(int *Mat1,int *Mat2,int *Mat3,int rowM1,int colM1,int colM2){
__shared__ int Mds[TILE_WIDTH][TILE_WIDTH];
__shared__ int Nds[TILE_WIDTH][TILE_WIDTH];
int bx = blockIdx.x;
int by = blockIdx.y;
int tx = threadIdx.x;
int ty = threadIdx.y;
int row = by * TILE_WIDTH + ty;
int col = bx * TILE_WIDTH + tx;
int Pvalue = 0;
for(int k = 0; k < (colM1+TILE_WIDTH-1)/(TILE_WIDTH); ++k){
if(k*TILE_WIDTH + tx < colM1 && row < rowM1){
Mds[ty][tx] = Mat1[row*colM1 + k*TILE_WIDTH + tx];
}else{
Mds[ty][tx] = 0;
}
if(k*TILE_WIDTH + ty < colM1 && col < colM2){
Nds[ty][tx] = Mat2[(k*TILE_WIDTH + ty) * colM2 + col];
}else{
Nds[ty][tx] =0;
}
__syncthreads();
for(int k = 0; k < TILE_WIDTH; ++k){
Pvalue += Mds[ty][k] * Nds[k][tx];
}
__syncthreads();
}
if (row < rowM1 && col < colM2){
Mat3[row*colM2+col] = Pvalue;
}
}
void d_MatrixMult(int *Mat1,int *Mat2,int *Mat3,int rowM1,int colM1,int colM2, int op ){
int * d_Mat1;
int * d_Mat2;
int * d_Mat3;
float Blocksize=TILE_WIDTH; // Bloque de 2 dimensiones 32*32=256 número de blokes= 1024 (1024/256=4)
int size1=rowM1*colM1;
int size2=colM1*colM2;
int size3=rowM1*colM2;
// 1. Separamos memoria en el device
cudaMalloc(&d_Mat1,size1*sizeof(int));
cudaMalloc(&d_Mat2,size2*sizeof(int));
cudaMalloc(&d_Mat3,size3*sizeof(int));
// 2. Copiamos el valor de las variables de host a las variables del device.
cudaMemcpy(d_Mat1, Mat1,size1*sizeof(int),cudaMemcpyHostToDevice);
cudaMemcpy(d_Mat2, Mat2,size2*sizeof(int),cudaMemcpyHostToDevice);
// 3. Lógica de bloques e hilos, elementos para realizar la parelelización.
dim3 dimGrid(ceil(colM2/Blocksize),ceil(rowM1/Blocksize),1);
//dim3 dimGrid((m+Blocksize-1)/Blocksize,(p+Blocksize-1)/Blocksize,1);
dim3 dimBlock(Blocksize,Blocksize,1);
// 4. Invocación del kernel (invoción del host, ejecutadas en el device), <<<<#dimGrid,#dimBlock>>>
if(op==1){KernelNormalMul<<<dimGrid,dimBlock>>>(d_Mat1,d_Mat2,d_Mat3,rowM1,colM1,colM2);}else{
KernelTilesMul<<<dimGrid,dimBlock>>>(d_Mat1,d_Mat2,d_Mat3,rowM1,colM1,colM2);
}
// 5. Copiamos el resultado para mostrar en el I/O del host.
cudaMemcpy (Mat3,d_Mat3,size3*sizeof(int),cudaMemcpyDeviceToHost);
// 6. Liberamos memoria.
cudaFree(d_Mat3);
}
// :::::::::::::::::::::::::::::::::::::::Normal::::::::::::::::::::::::::::::::
void h_Mul_Mat(int *Mat1,int *Mat2, int *Mat3,int m,int n,int p){
for(int i=0;i<m;i++){
for(int j=0;j<p;j++){
int value=0;
for(int k=0;k<n;k++){
value+=Mat1[n*i+k]*Mat2[p*k+j];
}
Mat3[p*i+j]=value;
}
}
}
void llena_mat(int *Mat, int Value,int m,int n){// ver matriz como vector serial.
int size=n*m; // matriz lineal
for(int i =0 ; i<size ; i++){
Mat[i]=Value;
}
}
void mostrar_mat(int *Mat,int m,int n){//
int size=n*m; // matriz lineal
for (int i=0;i<size;i++) {
if(i%n==0 && n!=0){
cout<<endl;
}
cout<<"["<<Mat[i]<<"] ";
}
cout<<endl;
}
int check_mat(int *Mat1,int *Mat2,int m,int p){
for(int i=0; i<(m*p);++i){
if(Mat1[i]!=Mat2[i]){
cout<<"Error, Las matrices no son iguales"<<endl;
return 0;
}
}
cout<<"Las Matrices son iguales"<<endl;
return 0;
}
// :::::::::::::::::::::::::::::::::::Clock Function::::::::::::::::::::::::::::
double diffclock(clock_t clock1,clock_t clock2){
double diffticks=clock2-clock1;
double diffms=(diffticks)/(CLOCKS_PER_SEC/1); // /1000 mili
return diffms;
}
// :::::::::::::::::::::::::::::::::::::::Main::::::::::::::::::::::::::::::::.
int main(){
double T1,T2,T3; // variables de tiempo
int rowM1=2;
int colM1=4;
int colM2=4;
int *Mat1 = (int*)malloc((rowM1*colM1)*sizeof(int));
int *Mat2 = (int*)malloc((colM1*colM2)*sizeof(int));
int *Mat3 = (int*)malloc((rowM1*colM2)*sizeof(int));
int *Mat4 = (int*)malloc((rowM1*colM2)*sizeof(int));
int *Mat5 = (int*)malloc((rowM1*colM2)*sizeof(int));
llena_mat(Mat1,1,rowM1,colM1);
llena_mat(Mat2,1,colM1,colM2);
clock_t start = clock();
h_Mul_Mat(Mat1,Mat2,Mat3,rowM1,colM1,colM2);
clock_t end = clock();
T1=diffclock(start,end);
cout <<"Tiempo secuencial: "<<T1<<endl;
mostrar_mat(Mat3,rowM1,colM2);
clock_t start2 = clock();
d_MatrixMult(Mat1,Mat2,Mat4,rowM1,colM1,colM2,1); // paralelo
clock_t end2 = clock();
mostrar_mat(Mat4,rowM1,colM2);
T2=diffclock(start2,end2);
cout <<"Tiempo Paralelo: "<<T2<<endl;
cout<<"Aceleración lograda: "<<T1/T2<<endl;
check_mat(Mat3,Mat4,rowM1,colM2);
clock_t start3 = clock();
d_MatrixMult(Mat1,Mat2,Mat5,rowM1,colM1,colM2,2); // tiles
mostrar_mat(Mat5,rowM1,colM2);
clock_t end3 = clock();
T3=diffclock(start3,end3);
cout <<"Tiempo Paralelo con Tiles: "<<T3<<endl;
cout<<"Aceleración lograda Respecto a el tiempo paralelo: "<<T2/T3<<endl;
check_mat(Mat4,Mat5,rowM1,colM2);
free(M1);
free(M2);
free(M3);
free(M4);
free(M5);
return 0;
}
// http://www.techdarting.com/2014/03/matrix-multiplication-in-cuda-using.html
|
1199352304f11ff8d6daf44ed532ed52e587155c.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <stdio.h>
__global__ void helloFromGPU(void)
{
printf("Hello Cuda 8.0!(From GPU thread no: %d)\n",threadIdx.x);
}
int main()
{
printf("Hello Cuda 8.0!(From CPU)\n");
hipLaunchKernelGGL(( helloFromGPU), dim3(1), dim3(10), 0, 0, );
hipDeviceReset();
return 0;
}
| 1199352304f11ff8d6daf44ed532ed52e587155c.cu | #include <stdio.h>
__global__ void helloFromGPU(void)
{
printf("Hello Cuda 8.0!(From GPU thread no: %d)\n",threadIdx.x);
}
int main()
{
printf("Hello Cuda 8.0!(From CPU)\n");
helloFromGPU<<<1, 10>>>();
cudaDeviceReset();
return 0;
}
|
1ac36f864e967044f666d52db447d7de1052bf39.hip | // !!! This is a file automatically generated by hipify!!!
//tutorial: https://medium.com/@akshathvarugeese/cuda-c-functions-in-python-through-dll-and-ctypes-for-windows-os-c29f56361089
//how to make cross-platform code for shared library https://stackoverflow.com/questions/2164827/explicitly-exporting-shared-library-functions-in-linux
// how to warp C++ into C for ctypes: https://www.auctoris.co.uk/2017/04/29/calling-c-classes-from-python-with-ctypes/
#if defined(_MSC_VER)
// Microsoft
#define EXPORT extern "C" __declspec(dllexport)
#define IMPORT extern "C" __declspec(dllimport)
#elif defined(__GNUC__)
// GCC
#define EXPORT __attribute__((visibility("default")))
#define IMPORT
#else
// do nothing and hope for the best?
#define EXPORT
#define IMPORT
#pragma warning Unknown dynamic link import/export semantics.
#endif
#include <iostream>
#include <hip/hip_runtime.h>
#include <cmath>
#include <helper_cuda.h>
# define M_PI 3.14159265358979323846 /* pi */
const int N_CANNELS = 3;
const unsigned int core_gpu_max_size = 64 * 64;
__constant__ unsigned short core_gpu[core_gpu_max_size];
__global__ void blurKernel(unsigned char* image_gpu, int n_channels, int image_size_x, int image_strade, int kernel_size, unsigned char* image_out, int image_out_size_x, int image_out_size_y, int image_out_strade)
{
const unsigned int index_output_y = blockIdx.y * blockDim.y + threadIdx.y;
if (index_output_y < image_out_size_y)
{
const unsigned int index_output_x = blockIdx.x * blockDim.x + threadIdx.x;
if (index_output_x < image_out_size_x)
{
const unsigned int index_output_channel = blockIdx.z * blockDim.z + threadIdx.z;
int index_input_offset = image_strade * index_output_channel + index_output_y * image_size_x + index_output_x;
unsigned short sum = 0;
int core_index = 0;
for (int y = 0; y < kernel_size; y++)
{
int index_input_offset_2 = index_input_offset + y * image_size_x;
for (int x = 0; x < kernel_size; x++)
{
int index_input = index_input_offset_2 + x;
sum += ((unsigned short)image_gpu[index_input]) * core_gpu[core_index];
core_index++;
}
}
int index_output = image_out_strade * index_output_channel + index_output_y * image_out_size_x + index_output_x;
sum += 128; // for rounding
image_out[index_output] = (unsigned char)(sum / 256);
}
}
}
int integer_division_ceiling(int x, int y)
{
// https://stackoverflow.com/questions/2745074/fast-ceiling-of-an-integer-division-in-c-c
return x / y + (x % y != 0);
}
class Blurer
{
public:
Blurer(int image_size_x_inp, int image_size_y_inp, int kernel_size_inp, float sigma)
{
image_size_x = image_size_x_inp;
image_size_y = image_size_y_inp;
kernel_size = kernel_size_inp;
// create core:
float mean = (kernel_size - 1.0) / 2.0;
float variance = sigma * sigma;
float variance2 = 2 * variance;
float variance2pi = variance2 * M_PI;
int kernel_size2 = kernel_size * kernel_size;
float* core_float_cpu = (float*)malloc(sizeof(float) * kernel_size2);
int index = 0;
float core_float_cpu_sum = 0.0;
for (int y = 0; y < kernel_size; y++)
{
float dy = (float)y - mean;
float dy2 = dy * dy;
for (int x = 0; x < kernel_size; x++)
{
float dx = (float)x - mean;
float dx2 = dx * dx;
float argument_tmp = (dx2 + dy2) / variance2;
core_float_cpu[index] = exp(-argument_tmp) / (variance2pi);
core_float_cpu_sum += core_float_cpu[index];
index++;
}
}
for (index = 0; index < kernel_size2; index++)
{
core_float_cpu[index] = core_float_cpu[index] / core_float_cpu_sum;
}
unsigned short* core_cpu;
core_cpu = (unsigned short*)malloc(sizeof(unsigned short) * kernel_size2);
for (index = 0; index < kernel_size2; index++)
{
core_cpu[index] = (unsigned short)(roundf(256.0 * core_float_cpu[index]));
}
checkCudaErrors(hipMemcpyToSymbol(core_gpu, core_cpu, sizeof(unsigned short) * kernel_size2));
image_size = image_size_x * image_size_y * N_CANNELS;
if (hipMalloc((void**)&image_gpu, sizeof(unsigned char) * image_size) != hipSuccess)
{
std::cout << "Error allocating GPU image_gpu\n";
}
image_out_size_x = image_size_x - kernel_size + 1;
image_out_size_y = image_size_y - kernel_size + 1;
image_out_size = image_out_size_x * image_out_size_y * N_CANNELS;
if (hipMalloc((void**)&image_out_gpu, sizeof(unsigned char) * image_out_size) != hipSuccess)
{
std::cout << "Error allocating GPU image_out_gpu\n";
}
int image_out_thread_size_x = 16;
int image_out_thread_size_y = 16;
int image_out_thread_size_z = 1;
int image_out_grid_size_x = integer_division_ceiling(image_out_size_x, image_out_thread_size_x);
int image_out_grid_size_y = integer_division_ceiling(image_out_size_y, image_out_thread_size_y);
grid_image_out = dim3(image_out_grid_size_x, image_out_grid_size_y, image_out_thread_size_z);
threadBlock_image_out = dim3(image_out_thread_size_x, image_out_thread_size_y, N_CANNELS);
image_strade = image_size_x * image_size_y;
image_out_strade = image_out_size_x * image_out_size_y;
}
void blur(unsigned char* image, unsigned char* image_out)
{
hipMemcpy(image_gpu, image, sizeof(unsigned char) * image_size, hipMemcpyHostToDevice);
hipLaunchKernelGGL(( blurKernel), dim3(grid_image_out), dim3(threadBlock_image_out), 0, 0, image_gpu, N_CANNELS, image_size_x, image_strade, kernel_size, image_out_gpu, image_out_size_x, image_out_size_y, image_out_strade);
hipMemcpy(image_out, image_out_gpu, sizeof(char) * image_out_size, hipMemcpyDeviceToHost);
}
private:
int image_size;
int image_out_size;
dim3 grid_image_out;
dim3 threadBlock_image_out;
unsigned char* image_gpu;
int image_size_x;
int image_size_y;
int image_strade;
int kernel_size;
unsigned char* image_out_gpu;
int image_out_size_x;
int image_out_size_y;
int image_out_strade;
};
EXPORT Blurer* Blurer_new(int image_size_x_inp, int image_size_y_inp, int kernel_size_inp, float sigma)
{
return new Blurer(image_size_x_inp, image_size_y_inp, kernel_size_inp, sigma);
}
EXPORT void Blurer_blur(Blurer* blurer, unsigned char* image, unsigned char* image_out)
{
blurer->blur(image, image_out);
}
| 1ac36f864e967044f666d52db447d7de1052bf39.cu | //tutorial: https://medium.com/@akshathvarugeese/cuda-c-functions-in-python-through-dll-and-ctypes-for-windows-os-c29f56361089
//how to make cross-platform code for shared library https://stackoverflow.com/questions/2164827/explicitly-exporting-shared-library-functions-in-linux
// how to warp C++ into C for ctypes: https://www.auctoris.co.uk/2017/04/29/calling-c-classes-from-python-with-ctypes/
#if defined(_MSC_VER)
// Microsoft
#define EXPORT extern "C" __declspec(dllexport)
#define IMPORT extern "C" __declspec(dllimport)
#elif defined(__GNUC__)
// GCC
#define EXPORT __attribute__((visibility("default")))
#define IMPORT
#else
// do nothing and hope for the best?
#define EXPORT
#define IMPORT
#pragma warning Unknown dynamic link import/export semantics.
#endif
#include <iostream>
#include <cuda.h>
#include <cmath>
#include <helper_cuda.h>
# define M_PI 3.14159265358979323846 /* pi */
const int N_CANNELS = 3;
const unsigned int core_gpu_max_size = 64 * 64;
__constant__ unsigned short core_gpu[core_gpu_max_size];
__global__ void blurKernel(unsigned char* image_gpu, int n_channels, int image_size_x, int image_strade, int kernel_size, unsigned char* image_out, int image_out_size_x, int image_out_size_y, int image_out_strade)
{
const unsigned int index_output_y = blockIdx.y * blockDim.y + threadIdx.y;
if (index_output_y < image_out_size_y)
{
const unsigned int index_output_x = blockIdx.x * blockDim.x + threadIdx.x;
if (index_output_x < image_out_size_x)
{
const unsigned int index_output_channel = blockIdx.z * blockDim.z + threadIdx.z;
int index_input_offset = image_strade * index_output_channel + index_output_y * image_size_x + index_output_x;
unsigned short sum = 0;
int core_index = 0;
for (int y = 0; y < kernel_size; y++)
{
int index_input_offset_2 = index_input_offset + y * image_size_x;
for (int x = 0; x < kernel_size; x++)
{
int index_input = index_input_offset_2 + x;
sum += ((unsigned short)image_gpu[index_input]) * core_gpu[core_index];
core_index++;
}
}
int index_output = image_out_strade * index_output_channel + index_output_y * image_out_size_x + index_output_x;
sum += 128; // for rounding
image_out[index_output] = (unsigned char)(sum / 256);
}
}
}
int integer_division_ceiling(int x, int y)
{
// https://stackoverflow.com/questions/2745074/fast-ceiling-of-an-integer-division-in-c-c
return x / y + (x % y != 0);
}
class Blurer
{
public:
Blurer(int image_size_x_inp, int image_size_y_inp, int kernel_size_inp, float sigma)
{
image_size_x = image_size_x_inp;
image_size_y = image_size_y_inp;
kernel_size = kernel_size_inp;
// create core:
float mean = (kernel_size - 1.0) / 2.0;
float variance = sigma * sigma;
float variance2 = 2 * variance;
float variance2pi = variance2 * M_PI;
int kernel_size2 = kernel_size * kernel_size;
float* core_float_cpu = (float*)malloc(sizeof(float) * kernel_size2);
int index = 0;
float core_float_cpu_sum = 0.0;
for (int y = 0; y < kernel_size; y++)
{
float dy = (float)y - mean;
float dy2 = dy * dy;
for (int x = 0; x < kernel_size; x++)
{
float dx = (float)x - mean;
float dx2 = dx * dx;
float argument_tmp = (dx2 + dy2) / variance2;
core_float_cpu[index] = exp(-argument_tmp) / (variance2pi);
core_float_cpu_sum += core_float_cpu[index];
index++;
}
}
for (index = 0; index < kernel_size2; index++)
{
core_float_cpu[index] = core_float_cpu[index] / core_float_cpu_sum;
}
unsigned short* core_cpu;
core_cpu = (unsigned short*)malloc(sizeof(unsigned short) * kernel_size2);
for (index = 0; index < kernel_size2; index++)
{
core_cpu[index] = (unsigned short)(roundf(256.0 * core_float_cpu[index]));
}
checkCudaErrors(cudaMemcpyToSymbol(core_gpu, core_cpu, sizeof(unsigned short) * kernel_size2));
image_size = image_size_x * image_size_y * N_CANNELS;
if (cudaMalloc((void**)&image_gpu, sizeof(unsigned char) * image_size) != cudaSuccess)
{
std::cout << "Error allocating GPU image_gpu\n";
}
image_out_size_x = image_size_x - kernel_size + 1;
image_out_size_y = image_size_y - kernel_size + 1;
image_out_size = image_out_size_x * image_out_size_y * N_CANNELS;
if (cudaMalloc((void**)&image_out_gpu, sizeof(unsigned char) * image_out_size) != cudaSuccess)
{
std::cout << "Error allocating GPU image_out_gpu\n";
}
int image_out_thread_size_x = 16;
int image_out_thread_size_y = 16;
int image_out_thread_size_z = 1;
int image_out_grid_size_x = integer_division_ceiling(image_out_size_x, image_out_thread_size_x);
int image_out_grid_size_y = integer_division_ceiling(image_out_size_y, image_out_thread_size_y);
grid_image_out = dim3(image_out_grid_size_x, image_out_grid_size_y, image_out_thread_size_z);
threadBlock_image_out = dim3(image_out_thread_size_x, image_out_thread_size_y, N_CANNELS);
image_strade = image_size_x * image_size_y;
image_out_strade = image_out_size_x * image_out_size_y;
}
void blur(unsigned char* image, unsigned char* image_out)
{
cudaMemcpy(image_gpu, image, sizeof(unsigned char) * image_size, cudaMemcpyHostToDevice);
blurKernel<<<grid_image_out, threadBlock_image_out>>> (image_gpu, N_CANNELS, image_size_x, image_strade, kernel_size, image_out_gpu, image_out_size_x, image_out_size_y, image_out_strade);
cudaMemcpy(image_out, image_out_gpu, sizeof(char) * image_out_size, cudaMemcpyDeviceToHost);
}
private:
int image_size;
int image_out_size;
dim3 grid_image_out;
dim3 threadBlock_image_out;
unsigned char* image_gpu;
int image_size_x;
int image_size_y;
int image_strade;
int kernel_size;
unsigned char* image_out_gpu;
int image_out_size_x;
int image_out_size_y;
int image_out_strade;
};
EXPORT Blurer* Blurer_new(int image_size_x_inp, int image_size_y_inp, int kernel_size_inp, float sigma)
{
return new Blurer(image_size_x_inp, image_size_y_inp, kernel_size_inp, sigma);
}
EXPORT void Blurer_blur(Blurer* blurer, unsigned char* image, unsigned char* image_out)
{
blurer->blur(image, image_out);
}
|
452c17312418978ad3453f7fb05fdba6fafb39df.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
extern "C"
#include <math.h>
#include <hip/hip_complex.h>
__global__ void exp_double(int n,int idx,double *dy,int incy,double *result) {
for (int i = blockIdx.x * blockDim.x + threadIdx.x; i < n; i += blockDim.x * gridDim.x) {
if(i >= idx && i % incy == 0)
result[i] = exp(dy[i]);
}
} | 452c17312418978ad3453f7fb05fdba6fafb39df.cu | extern "C"
#include <math.h>
#include <cuComplex.h>
__global__ void exp_double(int n,int idx,double *dy,int incy,double *result) {
for (int i = blockIdx.x * blockDim.x + threadIdx.x; i < n; i += blockDim.x * gridDim.x) {
if(i >= idx && i % incy == 0)
result[i] = exp(dy[i]);
}
} |
85715da6a0026ae7592a12d54eba09d2ed9d4bd1.hip | // !!! This is a file automatically generated by hipify!!!
///////////////////////////////////////
///////////////////////////////// SSSP6
/////////////////////// usando texturas
///////////////////////////////////////
/* CWJ includes */
#include <hip/hip_runtime.h>
#include "comun.cu"
#define gpuErrchk(ans) { gpuAssert((ans), __FILE__, __LINE__); }
#ifndef _SSSP6_Texture_All
#define _SSSP6_Texture_All
//////////////////////////////////////////
bool ejecutarIteracion_SSSP6_tex_all(
const unsigned int nVuelta,
const dim3 grid, const dim3 threads,
const unsigned int nv, const unsigned int na,
const unsigned int mem_size_V, const unsigned int mem_size_A,
const unsigned int mem_size_C, const unsigned int mem_size_F,
const unsigned int infinito,
bool* p_h, bool* f_h, unsigned int* c_h ,
bool* p_d, bool* f_d, unsigned int* c_d,
unsigned int* chi, unsigned int* cho, unsigned int* cdi, unsigned int* cdo)
{
//RECUERDA: mem_size_V= (nv+1)*sizeof(unsigned int)
#ifdef DEBUG
printf("\n\n*******************\n");
printf("\nVUELTA %i\n",nVuelta);
mostrarUI(c_h, nv, "c_h");
mostrarB(f_h, nv, "f_h");
mostrarB(p_h, nv, "p_h");
printf("\nEJECUCION KERNEL 1\n");
printf("num_threadsInBlock= %i\n", threads.x);
printf("num_blocksInGrid= %i\n", grid.x);
#endif // DEBUG
/* Updated timer code for CUDA 9 */
hipEvent_t timerStart, timerStop;
float time;
// start things
hipEventCreate(&timerStart);
hipEventCreate(&timerStop);
hipEventRecord(timerStart, 0);
//ejecutar ltimo kernel
hipGetLastError(); // reset the runtime error variable to hipSuccess
// ACTUALIZANDO CAMINOS MINIMOS ESPECIALES: kernel1
hipLaunchKernelGGL(( kernel1_SSSP6_tex), dim3(grid),dim3(threads),threads.x*sizeof(unsigned int), 0, p_d, f_d, c_d);
// check if kernel execution generated and error
gpuErrchk(hipDeviceSynchronize());
gpuErrchk(hipPeekAtLastError());
hipDeviceSynchronize();
// end things
hipEventRecord(timerStop, 0);
hipEventSynchronize(timerStop);
hipEventElapsedTime(&time, timerStart, timerStop);
hipEventDestroy(timerStart);
hipEventDestroy(timerStop);
//printf("%.6f", time);
printf("K1 = %f\n", time);
#ifdef DEBUG
copiarD2H((void*)c_h, (void*)c_d, mem_size_C);
mostrarUI(c_h, nv, "c_h");
printf("\nEJECUCION KERNEL 2\n");
#endif // DEBUG
//MINIMIZANDO LOS COSTES RECIEN ACTUALIZADOS
unsigned int min= infinito;
minimizar(nv, c_d, p_d, threads, infinito, chi, cho, cdi, cdo, min);
#ifdef DEBUG
printf("\n\nEl minimo es %i\n", min);
printf("\nEJECUCION KERNEL 3\n");
printf("num_threadsInBlock= %i\n", threads.x);
printf("num_blocksInGrid= %i\n", grid.x);
#endif // DEBUG
// start things
hipEventCreate(&timerStart);
hipEventCreate(&timerStop);
hipEventRecord(timerStart, 0);
//ejecutar ltimo kernel
hipGetLastError(); // reset the runtime error variable to hipSuccess
//ACTUALIZANDO LA FRONTERA: Kernel3
hipLaunchKernelGGL(( kernel3_tex), dim3(grid),dim3(threads), 0, 0, p_d, f_d, min);
// check if kernel execution generated and error
gpuErrchk(hipDeviceSynchronize());
gpuErrchk(hipPeekAtLastError());
hipDeviceSynchronize();
// end things
hipEventRecord(timerStop, 0);
hipEventSynchronize(timerStop);
hipEventElapsedTime(&time, timerStart, timerStop);
hipEventDestroy(timerStart);
hipEventDestroy(timerStop);
//printf("%.6f", time);
printf("K3 = %f\n", time);
#ifdef DEBUG
copiarD2H( (void*) p_h, (void*)p_d, mem_size_F);
mostrarB(p_h, nv, "p_h");
copiarD2H( (void*) f_h, (void*)f_d, mem_size_F);
mostrarB(f_h, nv, "f_h");
#endif // DEBUG
return (min==infinito);
}
//////////////////////////////////
void testGraph_SSSP6_tex_all(
const unsigned int nv, const unsigned int mem_size_V,
const unsigned int na, const unsigned int mem_size_A,
const unsigned int infinito,
const unsigned int* v_h, const unsigned int* a_h, const unsigned int* w_h,
const unsigned int* reference)
{
//RECUERDA: mem_size_V= (nv+1)*sizeof(unsigned int)
unsigned int* v_d; //array de vrtices device
unsigned int* a_d; //array de aristas device
unsigned int* w_d; //array de pesos device
//copiar grafo de host a device
inicializar_Grafo_Device(v_h, mem_size_V, v_d,
a_h, mem_size_A, a_d,
w_h, w_d);
//enlazar las texturas
hipBindTexture(0, textura_v, v_d, mem_size_V);
hipBindTexture(0, textura_a, a_d, mem_size_A);
hipBindTexture(0, textura_w, w_d, mem_size_A);
unsigned int* c_h; //solucin en el host
unsigned int* c_d; //solucin en el device
unsigned int mem_size_C= mem_size_V-sizeof(unsigned int); //Descontar el tapon -4
inicializar_Sol(c_h, c_d, nv, mem_size_C, infinito);
bool* f_h; //frontera en el host
bool* f_d; //frontera en el device
unsigned int mem_size_F= sizeof(bool) * nv;
inicializar_Frontera(f_h, f_d, nv, mem_size_F);
bool* p_h; //pendientes por procesar
bool* p_d; //pendientes por procesar
inicializar_Pendientes(p_h, p_d, nv, mem_size_F);
//enlazar las texturas del algoritmo
hipBindTexture(0, textura_c, c_d, mem_size_C);
//hipBindTexture(0, textura_p, p_d, mem_size_F);
//hipBindTexture(0, textura_f, f_d, mem_size_F);
#ifdef DEBUG
//DEPURACION
printf("\nnv= %i\n", nv);
printf("na= %i\n", na);
printf("mem_size_V= %i\n", mem_size_V);
printf("mem_size_A= %i\n", mem_size_A);
printf("mem_size_F= %i\n\n", mem_size_F);
#endif // DEBUG
// setup execution parameters
unsigned int num_threadsInBlock= NUM_THREADS_IN_BLOCK;
//unsigned int num_blocksInGrid= nv/num_threadsInBlock;
unsigned int num_blocksInGrid = (nv + (num_threadsInBlock-1)) / num_threadsInBlock;
dim3 grid( num_blocksInGrid, 1, 1);
dim3 threads( num_threadsInBlock, 1, 1);
//RESERVAR ESPACIO PARA LA MINIMIZACION
unsigned int nvi= nv/(2*num_threadsInBlock);
unsigned int nvo= nvi/(2*num_threadsInBlock);
unsigned int* cdi;
unsigned int* cdo;
hipMalloc((void**) &cdi, nvi*sizeof(unsigned int));
hipMalloc((void**) &cdo, nvo*sizeof(unsigned int));
unsigned int* chi = (unsigned int*) malloc(nvi*sizeof(unsigned int));
unsigned int* cho = (unsigned int*) malloc(nvo*sizeof(unsigned int));
/* Updated timer code for CUDA 9 */
hipEvent_t timerStart, timerStop;
float time;
// start things
hipEventCreate(&timerStart);
hipEventCreate(&timerStop);
hipEventRecord(timerStart, 0);
//EJECUTAR VUELTAS
bool ultima= false;
unsigned int i= 0;
while(!ultima){
i++;
ultima= ejecutarIteracion_SSSP6_tex_all( i,
grid, threads,
nv, na,
mem_size_V, mem_size_A, mem_size_C, mem_size_F,
infinito,
p_h, f_h, c_h,
p_d, f_d, c_d,
chi, cho, cdi, cdo);
}//while
// end things
hipEventRecord(timerStop, 0);
hipEventSynchronize(timerStop);
hipEventElapsedTime(&time, timerStart, timerStop);
hipEventDestroy(timerStart);
hipEventDestroy(timerStop);
printf("%.6f", time);
copiarD2H((void*)c_h, (void*)c_d, mem_size_C);
//desenlazar las texturas
hipUnbindTexture(textura_v);
hipUnbindTexture(textura_a);
hipUnbindTexture(textura_w);
// cleanup memory
hipFree(v_d);
hipFree(a_d);
hipFree(w_d);
free( f_h);
free( p_h);
//desenlazar las texturas
hipUnbindTexture(textura_c);
//hipUnbindTexture(textura_p);
//hipUnbindTexture(textura_f);
hipFree(c_d);
hipFree(f_d);
hipFree(p_d);
free(chi);
free(cho);
hipFree(cdi);
hipFree(cdo);
// check result
//CUTBoolean res = cutComparei( (int*)reference, (int*)c_h, nv);
//printf( "%s\t", (1 == res) ? "OK" : "FAILED");
//mostrarUI(c_h, nv, "c_h");
//mostrarUI(reference, nv, "reference");
// cleanup memory
free(c_h);
}
#endif //#ifndef _SSSP6_Texture_All
| 85715da6a0026ae7592a12d54eba09d2ed9d4bd1.cu | ///////////////////////////////////////
///////////////////////////////// SSSP6
/////////////////////// usando texturas
///////////////////////////////////////
/* CWJ includes */
#include <cuda.h>
#include "comun.cu"
#define gpuErrchk(ans) { gpuAssert((ans), __FILE__, __LINE__); }
#ifndef _SSSP6_Texture_All
#define _SSSP6_Texture_All
//////////////////////////////////////////
bool ejecutarIteracion_SSSP6_tex_all(
const unsigned int nVuelta,
const dim3 grid, const dim3 threads,
const unsigned int nv, const unsigned int na,
const unsigned int mem_size_V, const unsigned int mem_size_A,
const unsigned int mem_size_C, const unsigned int mem_size_F,
const unsigned int infinito,
bool* p_h, bool* f_h, unsigned int* c_h ,
bool* p_d, bool* f_d, unsigned int* c_d,
unsigned int* chi, unsigned int* cho, unsigned int* cdi, unsigned int* cdo)
{
//RECUERDA: mem_size_V= (nv+1)*sizeof(unsigned int)
#ifdef DEBUG
printf("\n\n*******************\n");
printf("\nVUELTA %i\n",nVuelta);
mostrarUI(c_h, nv, "c_h");
mostrarB(f_h, nv, "f_h");
mostrarB(p_h, nv, "p_h");
printf("\nEJECUCION KERNEL 1\n");
printf("num_threadsInBlock= %i\n", threads.x);
printf("num_blocksInGrid= %i\n", grid.x);
#endif // DEBUG
/* Updated timer code for CUDA 9 */
cudaEvent_t timerStart, timerStop;
float time;
// start things
cudaEventCreate(&timerStart);
cudaEventCreate(&timerStop);
cudaEventRecord(timerStart, 0);
//ejecutar último kernel
cudaGetLastError(); // reset the runtime error variable to cudaSuccess
// ACTUALIZANDO CAMINOS MINIMOS ESPECIALES: kernel1
kernel1_SSSP6_tex<<<grid,threads,threads.x*sizeof(unsigned int)>>>( p_d, f_d, c_d);
// check if kernel execution generated and error
gpuErrchk(cudaDeviceSynchronize());
gpuErrchk(cudaPeekAtLastError());
cudaThreadSynchronize();
// end things
cudaEventRecord(timerStop, 0);
cudaEventSynchronize(timerStop);
cudaEventElapsedTime(&time, timerStart, timerStop);
cudaEventDestroy(timerStart);
cudaEventDestroy(timerStop);
//printf("%.6f", time);
printf("K1 = %f\n", time);
#ifdef DEBUG
copiarD2H((void*)c_h, (void*)c_d, mem_size_C);
mostrarUI(c_h, nv, "c_h");
printf("\nEJECUCION KERNEL 2\n");
#endif // DEBUG
//MINIMIZANDO LOS COSTES RECIEN ACTUALIZADOS
unsigned int min= infinito;
minimizar(nv, c_d, p_d, threads, infinito, chi, cho, cdi, cdo, min);
#ifdef DEBUG
printf("\n\nEl minimo es %i\n", min);
printf("\nEJECUCION KERNEL 3\n");
printf("num_threadsInBlock= %i\n", threads.x);
printf("num_blocksInGrid= %i\n", grid.x);
#endif // DEBUG
// start things
cudaEventCreate(&timerStart);
cudaEventCreate(&timerStop);
cudaEventRecord(timerStart, 0);
//ejecutar último kernel
cudaGetLastError(); // reset the runtime error variable to cudaSuccess
//ACTUALIZANDO LA FRONTERA: Kernel3
kernel3_tex<<<grid,threads>>>( p_d, f_d, min);
// check if kernel execution generated and error
gpuErrchk(cudaDeviceSynchronize());
gpuErrchk(cudaPeekAtLastError());
cudaThreadSynchronize();
// end things
cudaEventRecord(timerStop, 0);
cudaEventSynchronize(timerStop);
cudaEventElapsedTime(&time, timerStart, timerStop);
cudaEventDestroy(timerStart);
cudaEventDestroy(timerStop);
//printf("%.6f", time);
printf("K3 = %f\n", time);
#ifdef DEBUG
copiarD2H( (void*) p_h, (void*)p_d, mem_size_F);
mostrarB(p_h, nv, "p_h");
copiarD2H( (void*) f_h, (void*)f_d, mem_size_F);
mostrarB(f_h, nv, "f_h");
#endif // DEBUG
return (min==infinito);
}
//////////////////////////////////
void testGraph_SSSP6_tex_all(
const unsigned int nv, const unsigned int mem_size_V,
const unsigned int na, const unsigned int mem_size_A,
const unsigned int infinito,
const unsigned int* v_h, const unsigned int* a_h, const unsigned int* w_h,
const unsigned int* reference)
{
//RECUERDA: mem_size_V= (nv+1)*sizeof(unsigned int)
unsigned int* v_d; //array de vértices device
unsigned int* a_d; //array de aristas device
unsigned int* w_d; //array de pesos device
//copiar grafo de host a device
inicializar_Grafo_Device(v_h, mem_size_V, v_d,
a_h, mem_size_A, a_d,
w_h, w_d);
//enlazar las texturas
cudaBindTexture(0, textura_v, v_d, mem_size_V);
cudaBindTexture(0, textura_a, a_d, mem_size_A);
cudaBindTexture(0, textura_w, w_d, mem_size_A);
unsigned int* c_h; //solución en el host
unsigned int* c_d; //solución en el device
unsigned int mem_size_C= mem_size_V-sizeof(unsigned int); //Descontar el tapon -4
inicializar_Sol(c_h, c_d, nv, mem_size_C, infinito);
bool* f_h; //frontera en el host
bool* f_d; //frontera en el device
unsigned int mem_size_F= sizeof(bool) * nv;
inicializar_Frontera(f_h, f_d, nv, mem_size_F);
bool* p_h; //pendientes por procesar
bool* p_d; //pendientes por procesar
inicializar_Pendientes(p_h, p_d, nv, mem_size_F);
//enlazar las texturas del algoritmo
cudaBindTexture(0, textura_c, c_d, mem_size_C);
//cudaBindTexture(0, textura_p, p_d, mem_size_F);
//cudaBindTexture(0, textura_f, f_d, mem_size_F);
#ifdef DEBUG
//DEPURACION
printf("\nnv= %i\n", nv);
printf("na= %i\n", na);
printf("mem_size_V= %i\n", mem_size_V);
printf("mem_size_A= %i\n", mem_size_A);
printf("mem_size_F= %i\n\n", mem_size_F);
#endif // DEBUG
// setup execution parameters
unsigned int num_threadsInBlock= NUM_THREADS_IN_BLOCK;
//unsigned int num_blocksInGrid= nv/num_threadsInBlock;
unsigned int num_blocksInGrid = (nv + (num_threadsInBlock-1)) / num_threadsInBlock;
dim3 grid( num_blocksInGrid, 1, 1);
dim3 threads( num_threadsInBlock, 1, 1);
//RESERVAR ESPACIO PARA LA MINIMIZACION
unsigned int nvi= nv/(2*num_threadsInBlock);
unsigned int nvo= nvi/(2*num_threadsInBlock);
unsigned int* cdi;
unsigned int* cdo;
cudaMalloc((void**) &cdi, nvi*sizeof(unsigned int));
cudaMalloc((void**) &cdo, nvo*sizeof(unsigned int));
unsigned int* chi = (unsigned int*) malloc(nvi*sizeof(unsigned int));
unsigned int* cho = (unsigned int*) malloc(nvo*sizeof(unsigned int));
/* Updated timer code for CUDA 9 */
cudaEvent_t timerStart, timerStop;
float time;
// start things
cudaEventCreate(&timerStart);
cudaEventCreate(&timerStop);
cudaEventRecord(timerStart, 0);
//EJECUTAR VUELTAS
bool ultima= false;
unsigned int i= 0;
while(!ultima){
i++;
ultima= ejecutarIteracion_SSSP6_tex_all( i,
grid, threads,
nv, na,
mem_size_V, mem_size_A, mem_size_C, mem_size_F,
infinito,
p_h, f_h, c_h,
p_d, f_d, c_d,
chi, cho, cdi, cdo);
}//while
// end things
cudaEventRecord(timerStop, 0);
cudaEventSynchronize(timerStop);
cudaEventElapsedTime(&time, timerStart, timerStop);
cudaEventDestroy(timerStart);
cudaEventDestroy(timerStop);
printf("%.6f", time);
copiarD2H((void*)c_h, (void*)c_d, mem_size_C);
//desenlazar las texturas
cudaUnbindTexture(textura_v);
cudaUnbindTexture(textura_a);
cudaUnbindTexture(textura_w);
// cleanup memory
cudaFree(v_d);
cudaFree(a_d);
cudaFree(w_d);
free( f_h);
free( p_h);
//desenlazar las texturas
cudaUnbindTexture(textura_c);
//cudaUnbindTexture(textura_p);
//cudaUnbindTexture(textura_f);
cudaFree(c_d);
cudaFree(f_d);
cudaFree(p_d);
free(chi);
free(cho);
cudaFree(cdi);
cudaFree(cdo);
// check result
//CUTBoolean res = cutComparei( (int*)reference, (int*)c_h, nv);
//printf( "%s\t", (1 == res) ? "OK" : "FAILED");
//mostrarUI(c_h, nv, "c_h");
//mostrarUI(reference, nv, "reference");
// cleanup memory
free(c_h);
}
#endif //#ifndef _SSSP6_Texture_All
|
7682a8ab22af99e3c1b8b9613655631656a43591.hip | // !!! This is a file automatically generated by hipify!!!
#include <cudnn.h>
#include <stdio.h>
#include <hip/hip_runtime.h>
#include <malloc.h>
#include <cstdlib>
#include <time.h>
#include <iostream>
#include <sys/types.h>
#include <errno.h>
#include <vector>
#include <fstream>
#include <string>
#include <omp.h>
#define TH 2
#define TW 2
#define TC 16
#define C 128
#define N 96
#define H 14
#define W 14
#define TCS ((C-1)/TC + 1)
#define THS ((H-1)/TH + 1)
#define TWS ((W-1)/TW+1)
#define WPAD (TWS*TW + 2)
#define R 3
#define S 3
using namespace std;
#define checkCUDNN(expression) \
{ \
cudnnStatus_t status = (expression); \
if (status != CUDNN_STATUS_SUCCESS) { \
std::cerr << "Error on line " << __LINE__ << ": " \
<< cudnnGetErrorString(status) << std::endl; \
std::exit(EXIT_FAILURE); \
} \
}
inline void chkerr(hipError_t code)
{
if (code != hipSuccess)
{
std::cerr << "ERROR!!!:" << hipGetErrorString(code) <<endl;
exit(-1);
}
}
extern "C" __global__ void default_function_kernel0(float* __restrict__ data, float* __restrict__ kernel, float* __restrict__ compute) {
float compute_local[2];
__shared__ float pad_temp_shared[768];
__shared__ float kernel_shared[864];
float pad_temp_shared_local[6];
float kernel_shared_local[12];
compute_local[(0)] = 0.000000e+00f;
compute_local[(1)] = 0.000000e+00f;
for (int rc_outer = 0; rc_outer < 8; ++rc_outer) {
__syncthreads();
pad_temp_shared[(((((int)threadIdx.z) * 256) + (((int)threadIdx.x) * 19)))] = (((((1 <= (((int)blockIdx.y) + (((((int)threadIdx.z) * 16) + ((((int)threadIdx.x) * 19) >> 4)) % 3))) && ((((int)blockIdx.y) + (((((int)threadIdx.z) * 16) + ((((int)threadIdx.x) * 19) >> 4)) % 3)) < 15)) && (1 <= ((((int)threadIdx.x) * 19) & 15))) && (((((int)threadIdx.x) * 19) & 15) < 15)) ? data[(((((((rc_outer * 3136) + ((((((int)threadIdx.z) * 16) + ((((int)threadIdx.x) * 19) >> 4)) / 3) * 196)) + (((int)blockIdx.y) * 14)) + ((((((int)threadIdx.z) * 16) + ((((int)threadIdx.x) * 19) >> 4)) % 3) * 14)) + ((((int)threadIdx.x) * 19) & 15)) - 15))] : 0.000000e+00f);
pad_temp_shared[((((((int)threadIdx.z) * 256) + (((int)threadIdx.x) * 19)) + 1))] = (((((1 <= (((int)blockIdx.y) + (((((int)threadIdx.z) * 16) + (((((int)threadIdx.x) * 19) + 1) >> 4)) % 3))) && ((((int)blockIdx.y) + (((((int)threadIdx.z) * 16) + (((((int)threadIdx.x) * 19) + 1) >> 4)) % 3)) < 15)) && (1 <= (((((int)threadIdx.x) * 19) + 1) & 15))) && ((((((int)threadIdx.x) * 19) + 1) & 15) < 15)) ? data[(((((((rc_outer * 3136) + ((((((int)threadIdx.z) * 16) + (((((int)threadIdx.x) * 19) + 1) >> 4)) / 3) * 196)) + (((int)blockIdx.y) * 14)) + ((((((int)threadIdx.z) * 16) + (((((int)threadIdx.x) * 19) + 1) >> 4)) % 3) * 14)) + (((((int)threadIdx.x) * 19) + 1) & 15)) - 15))] : 0.000000e+00f);
pad_temp_shared[((((((int)threadIdx.z) * 256) + (((int)threadIdx.x) * 19)) + 2))] = (((((1 <= (((int)blockIdx.y) + (((((int)threadIdx.z) * 16) + (((((int)threadIdx.x) * 19) + 2) >> 4)) % 3))) && ((((int)blockIdx.y) + (((((int)threadIdx.z) * 16) + (((((int)threadIdx.x) * 19) + 2) >> 4)) % 3)) < 15)) && (1 <= (((((int)threadIdx.x) * 19) + 2) & 15))) && ((((((int)threadIdx.x) * 19) + 2) & 15) < 15)) ? data[(((((((rc_outer * 3136) + ((((((int)threadIdx.z) * 16) + (((((int)threadIdx.x) * 19) + 2) >> 4)) / 3) * 196)) + (((int)blockIdx.y) * 14)) + ((((((int)threadIdx.z) * 16) + (((((int)threadIdx.x) * 19) + 2) >> 4)) % 3) * 14)) + (((((int)threadIdx.x) * 19) + 2) & 15)) - 15))] : 0.000000e+00f);
pad_temp_shared[((((((int)threadIdx.z) * 256) + (((int)threadIdx.x) * 19)) + 3))] = (((((1 <= (((int)blockIdx.y) + (((((int)threadIdx.z) * 16) + (((((int)threadIdx.x) * 19) + 3) >> 4)) % 3))) && ((((int)blockIdx.y) + (((((int)threadIdx.z) * 16) + (((((int)threadIdx.x) * 19) + 3) >> 4)) % 3)) < 15)) && (1 <= (((((int)threadIdx.x) * 19) + 3) & 15))) && ((((((int)threadIdx.x) * 19) + 3) & 15) < 15)) ? data[(((((((rc_outer * 3136) + ((((((int)threadIdx.z) * 16) + (((((int)threadIdx.x) * 19) + 3) >> 4)) / 3) * 196)) + (((int)blockIdx.y) * 14)) + ((((((int)threadIdx.z) * 16) + (((((int)threadIdx.x) * 19) + 3) >> 4)) % 3) * 14)) + (((((int)threadIdx.x) * 19) + 3) & 15)) - 15))] : 0.000000e+00f);
pad_temp_shared[((((((int)threadIdx.z) * 256) + (((int)threadIdx.x) * 19)) + 4))] = (((((1 <= (((int)blockIdx.y) + (((((int)threadIdx.z) * 16) + (((((int)threadIdx.x) * 19) + 4) >> 4)) % 3))) && ((((int)blockIdx.y) + (((((int)threadIdx.z) * 16) + (((((int)threadIdx.x) * 19) + 4) >> 4)) % 3)) < 15)) && (1 <= (((((int)threadIdx.x) * 19) + 4) & 15))) && ((((((int)threadIdx.x) * 19) + 4) & 15) < 15)) ? data[(((((((rc_outer * 3136) + ((((((int)threadIdx.z) * 16) + (((((int)threadIdx.x) * 19) + 4) >> 4)) / 3) * 196)) + (((int)blockIdx.y) * 14)) + ((((((int)threadIdx.z) * 16) + (((((int)threadIdx.x) * 19) + 4) >> 4)) % 3) * 14)) + (((((int)threadIdx.x) * 19) + 4) & 15)) - 15))] : 0.000000e+00f);
pad_temp_shared[((((((int)threadIdx.z) * 256) + (((int)threadIdx.x) * 19)) + 5))] = (((((1 <= (((int)blockIdx.y) + (((((int)threadIdx.z) * 16) + (((((int)threadIdx.x) * 19) + 5) >> 4)) % 3))) && ((((int)blockIdx.y) + (((((int)threadIdx.z) * 16) + (((((int)threadIdx.x) * 19) + 5) >> 4)) % 3)) < 15)) && (1 <= (((((int)threadIdx.x) * 19) + 5) & 15))) && ((((((int)threadIdx.x) * 19) + 5) & 15) < 15)) ? data[(((((((rc_outer * 3136) + ((((((int)threadIdx.z) * 16) + (((((int)threadIdx.x) * 19) + 5) >> 4)) / 3) * 196)) + (((int)blockIdx.y) * 14)) + ((((((int)threadIdx.z) * 16) + (((((int)threadIdx.x) * 19) + 5) >> 4)) % 3) * 14)) + (((((int)threadIdx.x) * 19) + 5) & 15)) - 15))] : 0.000000e+00f);
pad_temp_shared[((((((int)threadIdx.z) * 256) + (((int)threadIdx.x) * 19)) + 6))] = (((((1 <= (((int)blockIdx.y) + (((((int)threadIdx.z) * 16) + (((((int)threadIdx.x) * 19) + 6) >> 4)) % 3))) && ((((int)blockIdx.y) + (((((int)threadIdx.z) * 16) + (((((int)threadIdx.x) * 19) + 6) >> 4)) % 3)) < 15)) && (1 <= (((((int)threadIdx.x) * 19) + 6) & 15))) && ((((((int)threadIdx.x) * 19) + 6) & 15) < 15)) ? data[(((((((rc_outer * 3136) + ((((((int)threadIdx.z) * 16) + (((((int)threadIdx.x) * 19) + 6) >> 4)) / 3) * 196)) + (((int)blockIdx.y) * 14)) + ((((((int)threadIdx.z) * 16) + (((((int)threadIdx.x) * 19) + 6) >> 4)) % 3) * 14)) + (((((int)threadIdx.x) * 19) + 6) & 15)) - 15))] : 0.000000e+00f);
pad_temp_shared[((((((int)threadIdx.z) * 256) + (((int)threadIdx.x) * 19)) + 7))] = (((((1 <= (((int)blockIdx.y) + (((((int)threadIdx.z) * 16) + (((((int)threadIdx.x) * 19) + 7) >> 4)) % 3))) && ((((int)blockIdx.y) + (((((int)threadIdx.z) * 16) + (((((int)threadIdx.x) * 19) + 7) >> 4)) % 3)) < 15)) && (1 <= (((((int)threadIdx.x) * 19) + 7) & 15))) && ((((((int)threadIdx.x) * 19) + 7) & 15) < 15)) ? data[(((((((rc_outer * 3136) + ((((((int)threadIdx.z) * 16) + (((((int)threadIdx.x) * 19) + 7) >> 4)) / 3) * 196)) + (((int)blockIdx.y) * 14)) + ((((((int)threadIdx.z) * 16) + (((((int)threadIdx.x) * 19) + 7) >> 4)) % 3) * 14)) + (((((int)threadIdx.x) * 19) + 7) & 15)) - 15))] : 0.000000e+00f);
pad_temp_shared[((((((int)threadIdx.z) * 256) + (((int)threadIdx.x) * 19)) + 8))] = (((((1 <= (((int)blockIdx.y) + (((((int)threadIdx.z) * 16) + (((((int)threadIdx.x) * 19) + 8) >> 4)) % 3))) && ((((int)blockIdx.y) + (((((int)threadIdx.z) * 16) + (((((int)threadIdx.x) * 19) + 8) >> 4)) % 3)) < 15)) && (1 <= (((((int)threadIdx.x) * 19) + 8) & 15))) && ((((((int)threadIdx.x) * 19) + 8) & 15) < 15)) ? data[(((((((rc_outer * 3136) + ((((((int)threadIdx.z) * 16) + (((((int)threadIdx.x) * 19) + 8) >> 4)) / 3) * 196)) + (((int)blockIdx.y) * 14)) + ((((((int)threadIdx.z) * 16) + (((((int)threadIdx.x) * 19) + 8) >> 4)) % 3) * 14)) + (((((int)threadIdx.x) * 19) + 8) & 15)) - 15))] : 0.000000e+00f);
if (((((int)threadIdx.z) * 16) + (((((int)threadIdx.x) * 19) + 9) >> 4)) < 48) {
if (((((int)threadIdx.z) * 256) + (((int)threadIdx.x) * 19)) < 759) {
if (((int)threadIdx.x) < 13) {
pad_temp_shared[((((((int)threadIdx.z) * 256) + (((int)threadIdx.x) * 19)) + 9))] = (((((1 <= (((int)blockIdx.y) + (((((int)threadIdx.z) * 16) + (((((int)threadIdx.x) * 19) + 9) >> 4)) % 3))) && ((((int)blockIdx.y) + (((((int)threadIdx.z) * 16) + (((((int)threadIdx.x) * 19) + 9) >> 4)) % 3)) < 15)) && (1 <= (((((int)threadIdx.x) * 19) + 9) & 15))) && ((((((int)threadIdx.x) * 19) + 9) & 15) < 15)) ? data[(((((((rc_outer * 3136) + ((((((int)threadIdx.z) * 16) + (((((int)threadIdx.x) * 19) + 9) >> 4)) / 3) * 196)) + (((int)blockIdx.y) * 14)) + ((((((int)threadIdx.z) * 16) + (((((int)threadIdx.x) * 19) + 9) >> 4)) % 3) * 14)) + (((((int)threadIdx.x) * 19) + 9) & 15)) - 15))] : 0.000000e+00f);
}
}
}
if (((((int)threadIdx.z) * 16) + (((((int)threadIdx.x) * 19) + 10) >> 4)) < 48) {
if (((((int)threadIdx.z) * 256) + (((int)threadIdx.x) * 19)) < 758) {
if (((int)threadIdx.x) < 13) {
pad_temp_shared[((((((int)threadIdx.z) * 256) + (((int)threadIdx.x) * 19)) + 10))] = (((((1 <= (((int)blockIdx.y) + (((((int)threadIdx.z) * 16) + (((((int)threadIdx.x) * 19) + 10) >> 4)) % 3))) && ((((int)blockIdx.y) + (((((int)threadIdx.z) * 16) + (((((int)threadIdx.x) * 19) + 10) >> 4)) % 3)) < 15)) && (1 <= (((((int)threadIdx.x) * 19) + 10) & 15))) && ((((((int)threadIdx.x) * 19) + 10) & 15) < 15)) ? data[(((((((rc_outer * 3136) + ((((((int)threadIdx.z) * 16) + (((((int)threadIdx.x) * 19) + 10) >> 4)) / 3) * 196)) + (((int)blockIdx.y) * 14)) + ((((((int)threadIdx.z) * 16) + (((((int)threadIdx.x) * 19) + 10) >> 4)) % 3) * 14)) + (((((int)threadIdx.x) * 19) + 10) & 15)) - 15))] : 0.000000e+00f);
}
}
}
if (((((int)threadIdx.z) * 16) + (((((int)threadIdx.x) * 19) + 11) >> 4)) < 48) {
if (((((int)threadIdx.z) * 256) + (((int)threadIdx.x) * 19)) < 757) {
if (((int)threadIdx.x) < 13) {
pad_temp_shared[((((((int)threadIdx.z) * 256) + (((int)threadIdx.x) * 19)) + 11))] = (((((1 <= (((int)blockIdx.y) + (((((int)threadIdx.z) * 16) + (((((int)threadIdx.x) * 19) + 11) >> 4)) % 3))) && ((((int)blockIdx.y) + (((((int)threadIdx.z) * 16) + (((((int)threadIdx.x) * 19) + 11) >> 4)) % 3)) < 15)) && (1 <= (((((int)threadIdx.x) * 19) + 11) & 15))) && ((((((int)threadIdx.x) * 19) + 11) & 15) < 15)) ? data[(((((((rc_outer * 3136) + ((((((int)threadIdx.z) * 16) + (((((int)threadIdx.x) * 19) + 11) >> 4)) / 3) * 196)) + (((int)blockIdx.y) * 14)) + ((((((int)threadIdx.z) * 16) + (((((int)threadIdx.x) * 19) + 11) >> 4)) % 3) * 14)) + (((((int)threadIdx.x) * 19) + 11) & 15)) - 15))] : 0.000000e+00f);
}
}
}
if (((((int)threadIdx.z) * 16) + (((((int)threadIdx.x) * 19) + 12) >> 4)) < 48) {
if (((((int)threadIdx.z) * 256) + (((int)threadIdx.x) * 19)) < 756) {
if (((int)threadIdx.x) < 13) {
pad_temp_shared[((((((int)threadIdx.z) * 256) + (((int)threadIdx.x) * 19)) + 12))] = (((((1 <= (((int)blockIdx.y) + (((((int)threadIdx.z) * 16) + (((((int)threadIdx.x) * 19) + 12) >> 4)) % 3))) && ((((int)blockIdx.y) + (((((int)threadIdx.z) * 16) + (((((int)threadIdx.x) * 19) + 12) >> 4)) % 3)) < 15)) && (1 <= (((((int)threadIdx.x) * 19) + 12) & 15))) && ((((((int)threadIdx.x) * 19) + 12) & 15) < 15)) ? data[(((((((rc_outer * 3136) + ((((((int)threadIdx.z) * 16) + (((((int)threadIdx.x) * 19) + 12) >> 4)) / 3) * 196)) + (((int)blockIdx.y) * 14)) + ((((((int)threadIdx.z) * 16) + (((((int)threadIdx.x) * 19) + 12) >> 4)) % 3) * 14)) + (((((int)threadIdx.x) * 19) + 12) & 15)) - 15))] : 0.000000e+00f);
}
}
}
if (((((int)threadIdx.z) * 16) + (((((int)threadIdx.x) * 19) + 13) >> 4)) < 48) {
if (((((int)threadIdx.z) * 256) + (((int)threadIdx.x) * 19)) < 755) {
if (((int)threadIdx.x) < 13) {
pad_temp_shared[((((((int)threadIdx.z) * 256) + (((int)threadIdx.x) * 19)) + 13))] = (((((1 <= (((int)blockIdx.y) + (((((int)threadIdx.z) * 16) + (((((int)threadIdx.x) * 19) + 13) >> 4)) % 3))) && ((((int)blockIdx.y) + (((((int)threadIdx.z) * 16) + (((((int)threadIdx.x) * 19) + 13) >> 4)) % 3)) < 15)) && (1 <= (((((int)threadIdx.x) * 19) + 13) & 15))) && ((((((int)threadIdx.x) * 19) + 13) & 15) < 15)) ? data[(((((((rc_outer * 3136) + ((((((int)threadIdx.z) * 16) + (((((int)threadIdx.x) * 19) + 13) >> 4)) / 3) * 196)) + (((int)blockIdx.y) * 14)) + ((((((int)threadIdx.z) * 16) + (((((int)threadIdx.x) * 19) + 13) >> 4)) % 3) * 14)) + (((((int)threadIdx.x) * 19) + 13) & 15)) - 15))] : 0.000000e+00f);
}
}
}
if (((((int)threadIdx.z) * 16) + (((((int)threadIdx.x) * 19) + 14) >> 4)) < 48) {
if (((((int)threadIdx.z) * 256) + (((int)threadIdx.x) * 19)) < 754) {
if (((int)threadIdx.x) < 13) {
pad_temp_shared[((((((int)threadIdx.z) * 256) + (((int)threadIdx.x) * 19)) + 14))] = (((((1 <= (((int)blockIdx.y) + (((((int)threadIdx.z) * 16) + (((((int)threadIdx.x) * 19) + 14) >> 4)) % 3))) && ((((int)blockIdx.y) + (((((int)threadIdx.z) * 16) + (((((int)threadIdx.x) * 19) + 14) >> 4)) % 3)) < 15)) && (1 <= (((((int)threadIdx.x) * 19) + 14) & 15))) && ((((((int)threadIdx.x) * 19) + 14) & 15) < 15)) ? data[(((((((rc_outer * 3136) + ((((((int)threadIdx.z) * 16) + (((((int)threadIdx.x) * 19) + 14) >> 4)) / 3) * 196)) + (((int)blockIdx.y) * 14)) + ((((((int)threadIdx.z) * 16) + (((((int)threadIdx.x) * 19) + 14) >> 4)) % 3) * 14)) + (((((int)threadIdx.x) * 19) + 14) & 15)) - 15))] : 0.000000e+00f);
}
}
}
if (((((int)threadIdx.z) * 16) + (((((int)threadIdx.x) * 19) + 15) >> 4)) < 48) {
if (((((int)threadIdx.z) * 256) + (((int)threadIdx.x) * 19)) < 753) {
if (((int)threadIdx.x) < 13) {
pad_temp_shared[((((((int)threadIdx.z) * 256) + (((int)threadIdx.x) * 19)) + 15))] = (((((1 <= (((int)blockIdx.y) + (((((int)threadIdx.z) * 16) + (((((int)threadIdx.x) * 19) + 15) >> 4)) % 3))) && ((((int)blockIdx.y) + (((((int)threadIdx.z) * 16) + (((((int)threadIdx.x) * 19) + 15) >> 4)) % 3)) < 15)) && (1 <= (((((int)threadIdx.x) * 19) + 15) & 15))) && ((((((int)threadIdx.x) * 19) + 15) & 15) < 15)) ? data[(((((((rc_outer * 3136) + ((((((int)threadIdx.z) * 16) + (((((int)threadIdx.x) * 19) + 15) >> 4)) / 3) * 196)) + (((int)blockIdx.y) * 14)) + ((((((int)threadIdx.z) * 16) + (((((int)threadIdx.x) * 19) + 15) >> 4)) % 3) * 14)) + (((((int)threadIdx.x) * 19) + 15) & 15)) - 15))] : 0.000000e+00f);
}
}
}
if (((((int)threadIdx.z) * 16) + ((((int)threadIdx.x) * 19) >> 4)) < 47) {
if (((((int)threadIdx.z) * 256) + (((int)threadIdx.x) * 19)) < 752) {
if (((int)threadIdx.x) < 13) {
pad_temp_shared[((((((int)threadIdx.z) * 256) + (((int)threadIdx.x) * 19)) + 16))] = (((((1 <= (((int)blockIdx.y) + ((((((int)threadIdx.z) * 16) + ((((int)threadIdx.x) * 19) >> 4)) + 1) % 3))) && ((((int)blockIdx.y) + ((((((int)threadIdx.z) * 16) + ((((int)threadIdx.x) * 19) >> 4)) + 1) % 3)) < 15)) && (1 <= ((((int)threadIdx.x) * 19) & 15))) && (((((int)threadIdx.x) * 19) & 15) < 15)) ? data[(((((((rc_outer * 3136) + (((((((int)threadIdx.z) * 16) + ((((int)threadIdx.x) * 19) >> 4)) + 1) / 3) * 196)) + (((int)blockIdx.y) * 14)) + (((((((int)threadIdx.z) * 16) + ((((int)threadIdx.x) * 19) >> 4)) + 1) % 3) * 14)) + ((((int)threadIdx.x) * 19) & 15)) - 15))] : 0.000000e+00f);
}
}
}
if (((((int)threadIdx.z) * 16) + (((((int)threadIdx.x) * 19) + 17) >> 4)) < 48) {
if (((((int)threadIdx.z) * 256) + (((int)threadIdx.x) * 19)) < 751) {
if (((int)threadIdx.x) < 13) {
pad_temp_shared[((((((int)threadIdx.z) * 256) + (((int)threadIdx.x) * 19)) + 17))] = (((((1 <= (((int)blockIdx.y) + (((((int)threadIdx.z) * 16) + (((((int)threadIdx.x) * 19) + 17) >> 4)) % 3))) && ((((int)blockIdx.y) + (((((int)threadIdx.z) * 16) + (((((int)threadIdx.x) * 19) + 17) >> 4)) % 3)) < 15)) && (1 <= (((((int)threadIdx.x) * 19) + 1) & 15))) && ((((((int)threadIdx.x) * 19) + 1) & 15) < 15)) ? data[(((((((rc_outer * 3136) + ((((((int)threadIdx.z) * 16) + (((((int)threadIdx.x) * 19) + 17) >> 4)) / 3) * 196)) + (((int)blockIdx.y) * 14)) + ((((((int)threadIdx.z) * 16) + (((((int)threadIdx.x) * 19) + 17) >> 4)) % 3) * 14)) + (((((int)threadIdx.x) * 19) + 1) & 15)) - 15))] : 0.000000e+00f);
}
}
}
if (((((int)threadIdx.z) * 16) + (((((int)threadIdx.x) * 19) + 18) >> 4)) < 48) {
if (((((int)threadIdx.z) * 256) + (((int)threadIdx.x) * 19)) < 750) {
if (((int)threadIdx.x) < 13) {
pad_temp_shared[((((((int)threadIdx.z) * 256) + (((int)threadIdx.x) * 19)) + 18))] = (((((1 <= (((int)blockIdx.y) + (((((int)threadIdx.z) * 16) + (((((int)threadIdx.x) * 19) + 18) >> 4)) % 3))) && ((((int)blockIdx.y) + (((((int)threadIdx.z) * 16) + (((((int)threadIdx.x) * 19) + 18) >> 4)) % 3)) < 15)) && (1 <= (((((int)threadIdx.x) * 19) + 2) & 15))) && ((((((int)threadIdx.x) * 19) + 2) & 15) < 15)) ? data[(((((((rc_outer * 3136) + ((((((int)threadIdx.z) * 16) + (((((int)threadIdx.x) * 19) + 18) >> 4)) / 3) * 196)) + (((int)blockIdx.y) * 14)) + ((((((int)threadIdx.z) * 16) + (((((int)threadIdx.x) * 19) + 18) >> 4)) % 3) * 14)) + (((((int)threadIdx.x) * 19) + 2) & 15)) - 15))] : 0.000000e+00f);
}
}
}
kernel_shared[(((((int)threadIdx.z) * 288) + (((int)threadIdx.x) * 21)))] = kernel[((((((((int)blockIdx.z) * 6912) + (((int)threadIdx.z) * 2304)) + (((((int)threadIdx.x) * 7) / 48) * 1152)) + (rc_outer * 144)) + (((((int)threadIdx.x) * 7) % 48) * 3)))];
kernel_shared[((((((int)threadIdx.z) * 288) + (((int)threadIdx.x) * 21)) + 1))] = kernel[(((((((((int)blockIdx.z) * 6912) + (((int)threadIdx.z) * 2304)) + (((((int)threadIdx.x) * 7) / 48) * 1152)) + (rc_outer * 144)) + (((((int)threadIdx.x) * 7) % 48) * 3)) + 1))];
kernel_shared[((((((int)threadIdx.z) * 288) + (((int)threadIdx.x) * 21)) + 2))] = kernel[(((((((((int)blockIdx.z) * 6912) + (((int)threadIdx.z) * 2304)) + (((((int)threadIdx.x) * 7) / 48) * 1152)) + (rc_outer * 144)) + (((((int)threadIdx.x) * 7) % 48) * 3)) + 2))];
kernel_shared[((((((int)threadIdx.z) * 288) + (((int)threadIdx.x) * 21)) + 3))] = kernel[((((((((int)blockIdx.z) * 6912) + (((int)threadIdx.z) * 2304)) + ((((((int)threadIdx.x) * 7) + 1) / 48) * 1152)) + (rc_outer * 144)) + ((((((int)threadIdx.x) * 7) + 1) % 48) * 3)))];
kernel_shared[((((((int)threadIdx.z) * 288) + (((int)threadIdx.x) * 21)) + 4))] = kernel[(((((((((int)blockIdx.z) * 6912) + (((int)threadIdx.z) * 2304)) + ((((((int)threadIdx.x) * 7) + 1) / 48) * 1152)) + (rc_outer * 144)) + ((((((int)threadIdx.x) * 7) + 1) % 48) * 3)) + 1))];
kernel_shared[((((((int)threadIdx.z) * 288) + (((int)threadIdx.x) * 21)) + 5))] = kernel[(((((((((int)blockIdx.z) * 6912) + (((int)threadIdx.z) * 2304)) + ((((((int)threadIdx.x) * 7) + 1) / 48) * 1152)) + (rc_outer * 144)) + ((((((int)threadIdx.x) * 7) + 1) % 48) * 3)) + 2))];
kernel_shared[((((((int)threadIdx.z) * 288) + (((int)threadIdx.x) * 21)) + 6))] = kernel[((((((((int)blockIdx.z) * 6912) + (((int)threadIdx.z) * 2304)) + ((((((int)threadIdx.x) * 7) + 2) / 48) * 1152)) + (rc_outer * 144)) + ((((((int)threadIdx.x) * 7) + 2) % 48) * 3)))];
kernel_shared[((((((int)threadIdx.z) * 288) + (((int)threadIdx.x) * 21)) + 7))] = kernel[(((((((((int)blockIdx.z) * 6912) + (((int)threadIdx.z) * 2304)) + ((((((int)threadIdx.x) * 7) + 2) / 48) * 1152)) + (rc_outer * 144)) + ((((((int)threadIdx.x) * 7) + 2) % 48) * 3)) + 1))];
kernel_shared[((((((int)threadIdx.z) * 288) + (((int)threadIdx.x) * 21)) + 8))] = kernel[(((((((((int)blockIdx.z) * 6912) + (((int)threadIdx.z) * 2304)) + ((((((int)threadIdx.x) * 7) + 2) / 48) * 1152)) + (rc_outer * 144)) + ((((((int)threadIdx.x) * 7) + 2) % 48) * 3)) + 2))];
kernel_shared[((((((int)threadIdx.z) * 288) + (((int)threadIdx.x) * 21)) + 9))] = kernel[((((((((int)blockIdx.z) * 6912) + (((int)threadIdx.z) * 2304)) + ((((((int)threadIdx.x) * 7) + 3) / 48) * 1152)) + (rc_outer * 144)) + ((((((int)threadIdx.x) * 7) + 3) % 48) * 3)))];
kernel_shared[((((((int)threadIdx.z) * 288) + (((int)threadIdx.x) * 21)) + 10))] = kernel[(((((((((int)blockIdx.z) * 6912) + (((int)threadIdx.z) * 2304)) + ((((((int)threadIdx.x) * 7) + 3) / 48) * 1152)) + (rc_outer * 144)) + ((((((int)threadIdx.x) * 7) + 3) % 48) * 3)) + 1))];
kernel_shared[((((((int)threadIdx.z) * 288) + (((int)threadIdx.x) * 21)) + 11))] = kernel[(((((((((int)blockIdx.z) * 6912) + (((int)threadIdx.z) * 2304)) + ((((((int)threadIdx.x) * 7) + 3) / 48) * 1152)) + (rc_outer * 144)) + ((((((int)threadIdx.x) * 7) + 3) % 48) * 3)) + 2))];
kernel_shared[((((((int)threadIdx.z) * 288) + (((int)threadIdx.x) * 21)) + 12))] = kernel[((((((((int)blockIdx.z) * 6912) + (((int)threadIdx.z) * 2304)) + ((((((int)threadIdx.x) * 7) + 4) / 48) * 1152)) + (rc_outer * 144)) + ((((((int)threadIdx.x) * 7) + 4) % 48) * 3)))];
kernel_shared[((((((int)threadIdx.z) * 288) + (((int)threadIdx.x) * 21)) + 13))] = kernel[(((((((((int)blockIdx.z) * 6912) + (((int)threadIdx.z) * 2304)) + ((((((int)threadIdx.x) * 7) + 4) / 48) * 1152)) + (rc_outer * 144)) + ((((((int)threadIdx.x) * 7) + 4) % 48) * 3)) + 1))];
kernel_shared[((((((int)threadIdx.z) * 288) + (((int)threadIdx.x) * 21)) + 14))] = kernel[(((((((((int)blockIdx.z) * 6912) + (((int)threadIdx.z) * 2304)) + ((((((int)threadIdx.x) * 7) + 4) / 48) * 1152)) + (rc_outer * 144)) + ((((((int)threadIdx.x) * 7) + 4) % 48) * 3)) + 2))];
if (((((int)threadIdx.z) * 2) + (((((int)threadIdx.x) * 7) + 5) / 48)) < 6) {
if (((((int)threadIdx.z) * 32) + (((((int)threadIdx.x) * 7) + 5) / 3)) < 96) {
if (((((int)threadIdx.z) * 96) + (((int)threadIdx.x) * 7)) < 283) {
if (((((int)threadIdx.z) * 288) + (((int)threadIdx.x) * 21)) < 849) {
if (((int)threadIdx.x) < 13) {
kernel_shared[((((((int)threadIdx.z) * 288) + (((int)threadIdx.x) * 21)) + 15))] = kernel[((((((((int)blockIdx.z) * 6912) + (((int)threadIdx.z) * 2304)) + ((((((int)threadIdx.x) * 7) + 5) / 48) * 1152)) + (rc_outer * 144)) + ((((((int)threadIdx.x) * 7) + 5) % 48) * 3)))];
}
}
}
}
}
if (((((int)threadIdx.z) * 2) + (((((int)threadIdx.x) * 7) + 5) / 48)) < 6) {
if (((((int)threadIdx.z) * 32) + (((((int)threadIdx.x) * 7) + 5) / 3)) < 96) {
if (((((int)threadIdx.z) * 96) + (((int)threadIdx.x) * 7)) < 283) {
if (((((int)threadIdx.z) * 288) + (((int)threadIdx.x) * 21)) < 848) {
if (((int)threadIdx.x) < 13) {
kernel_shared[((((((int)threadIdx.z) * 288) + (((int)threadIdx.x) * 21)) + 16))] = kernel[(((((((((int)blockIdx.z) * 6912) + (((int)threadIdx.z) * 2304)) + ((((((int)threadIdx.x) * 7) + 5) / 48) * 1152)) + (rc_outer * 144)) + ((((((int)threadIdx.x) * 7) + 5) % 48) * 3)) + 1))];
}
}
}
}
}
if (((((int)threadIdx.z) * 2) + (((((int)threadIdx.x) * 7) + 5) / 48)) < 6) {
if (((((int)threadIdx.z) * 32) + (((((int)threadIdx.x) * 7) + 5) / 3)) < 96) {
if (((((int)threadIdx.z) * 96) + (((int)threadIdx.x) * 7)) < 283) {
if (((((int)threadIdx.z) * 288) + (((int)threadIdx.x) * 21)) < 847) {
if (((int)threadIdx.x) < 13) {
kernel_shared[((((((int)threadIdx.z) * 288) + (((int)threadIdx.x) * 21)) + 17))] = kernel[(((((((((int)blockIdx.z) * 6912) + (((int)threadIdx.z) * 2304)) + ((((((int)threadIdx.x) * 7) + 5) / 48) * 1152)) + (rc_outer * 144)) + ((((((int)threadIdx.x) * 7) + 5) % 48) * 3)) + 2))];
}
}
}
}
}
if (((((int)threadIdx.z) * 2) + (((((int)threadIdx.x) * 7) + 6) / 48)) < 6) {
if (((((int)threadIdx.z) * 32) + ((((int)threadIdx.x) * 7) / 3)) < 94) {
if (((((int)threadIdx.z) * 96) + (((int)threadIdx.x) * 7)) < 282) {
if (((((int)threadIdx.z) * 288) + (((int)threadIdx.x) * 21)) < 846) {
if (((int)threadIdx.x) < 13) {
kernel_shared[((((((int)threadIdx.z) * 288) + (((int)threadIdx.x) * 21)) + 18))] = kernel[((((((((int)blockIdx.z) * 6912) + (((int)threadIdx.z) * 2304)) + ((((((int)threadIdx.x) * 7) + 6) / 48) * 1152)) + (rc_outer * 144)) + ((((((int)threadIdx.x) * 7) + 6) % 48) * 3)))];
}
}
}
}
}
if (((((int)threadIdx.z) * 2) + (((((int)threadIdx.x) * 7) + 6) / 48)) < 6) {
if (((((int)threadIdx.z) * 32) + ((((int)threadIdx.x) * 7) / 3)) < 94) {
if (((((int)threadIdx.z) * 96) + (((int)threadIdx.x) * 7)) < 282) {
if (((((int)threadIdx.z) * 288) + (((int)threadIdx.x) * 21)) < 845) {
if (((int)threadIdx.x) < 13) {
kernel_shared[((((((int)threadIdx.z) * 288) + (((int)threadIdx.x) * 21)) + 19))] = kernel[(((((((((int)blockIdx.z) * 6912) + (((int)threadIdx.z) * 2304)) + ((((((int)threadIdx.x) * 7) + 6) / 48) * 1152)) + (rc_outer * 144)) + ((((((int)threadIdx.x) * 7) + 6) % 48) * 3)) + 1))];
}
}
}
}
}
if (((((int)threadIdx.z) * 2) + (((((int)threadIdx.x) * 7) + 6) / 48)) < 6) {
if (((((int)threadIdx.z) * 32) + ((((int)threadIdx.x) * 7) / 3)) < 94) {
if (((((int)threadIdx.z) * 96) + (((int)threadIdx.x) * 7)) < 282) {
if (((((int)threadIdx.z) * 288) + (((int)threadIdx.x) * 21)) < 844) {
if (((int)threadIdx.x) < 13) {
kernel_shared[((((((int)threadIdx.z) * 288) + (((int)threadIdx.x) * 21)) + 20))] = kernel[(((((((((int)blockIdx.z) * 6912) + (((int)threadIdx.z) * 2304)) + ((((((int)threadIdx.x) * 7) + 6) / 48) * 1152)) + (rc_outer * 144)) + ((((((int)threadIdx.x) * 7) + 6) % 48) * 3)) + 2))];
}
}
}
}
}
__syncthreads();
for (int rc_inner_outer = 0; rc_inner_outer < 8; ++rc_inner_outer) {
pad_temp_shared_local[(0)] = pad_temp_shared[(((rc_inner_outer * 96) + ((int)threadIdx.x)))];
pad_temp_shared_local[(1)] = pad_temp_shared[((((rc_inner_outer * 96) + ((int)threadIdx.x)) + 1))];
pad_temp_shared_local[(2)] = pad_temp_shared[((((rc_inner_outer * 96) + ((int)threadIdx.x)) + 2))];
pad_temp_shared_local[(3)] = pad_temp_shared[((((rc_inner_outer * 96) + ((int)threadIdx.x)) + 48))];
pad_temp_shared_local[(4)] = pad_temp_shared[((((rc_inner_outer * 96) + ((int)threadIdx.x)) + 49))];
pad_temp_shared_local[(5)] = pad_temp_shared[((((rc_inner_outer * 96) + ((int)threadIdx.x)) + 50))];
kernel_shared_local[(0)] = kernel_shared[(((((int)threadIdx.z) * 144) + (rc_inner_outer * 18)))];
kernel_shared_local[(6)] = kernel_shared[((((((int)threadIdx.z) * 144) + (rc_inner_outer * 18)) + 432))];
kernel_shared_local[(1)] = kernel_shared[((((((int)threadIdx.z) * 144) + (rc_inner_outer * 18)) + 1))];
kernel_shared_local[(7)] = kernel_shared[((((((int)threadIdx.z) * 144) + (rc_inner_outer * 18)) + 433))];
kernel_shared_local[(2)] = kernel_shared[((((((int)threadIdx.z) * 144) + (rc_inner_outer * 18)) + 2))];
kernel_shared_local[(8)] = kernel_shared[((((((int)threadIdx.z) * 144) + (rc_inner_outer * 18)) + 434))];
kernel_shared_local[(3)] = kernel_shared[((((((int)threadIdx.z) * 144) + (rc_inner_outer * 18)) + 9))];
kernel_shared_local[(9)] = kernel_shared[((((((int)threadIdx.z) * 144) + (rc_inner_outer * 18)) + 441))];
kernel_shared_local[(4)] = kernel_shared[((((((int)threadIdx.z) * 144) + (rc_inner_outer * 18)) + 10))];
kernel_shared_local[(10)] = kernel_shared[((((((int)threadIdx.z) * 144) + (rc_inner_outer * 18)) + 442))];
kernel_shared_local[(5)] = kernel_shared[((((((int)threadIdx.z) * 144) + (rc_inner_outer * 18)) + 11))];
kernel_shared_local[(11)] = kernel_shared[((((((int)threadIdx.z) * 144) + (rc_inner_outer * 18)) + 443))];
compute_local[(0)] = (compute_local[(0)] + (pad_temp_shared_local[(0)] * kernel_shared_local[(0)]));
compute_local[(1)] = (compute_local[(1)] + (pad_temp_shared_local[(0)] * kernel_shared_local[(6)]));
compute_local[(0)] = (compute_local[(0)] + (pad_temp_shared_local[(1)] * kernel_shared_local[(1)]));
compute_local[(1)] = (compute_local[(1)] + (pad_temp_shared_local[(1)] * kernel_shared_local[(7)]));
compute_local[(0)] = (compute_local[(0)] + (pad_temp_shared_local[(2)] * kernel_shared_local[(2)]));
compute_local[(1)] = (compute_local[(1)] + (pad_temp_shared_local[(2)] * kernel_shared_local[(8)]));
compute_local[(0)] = (compute_local[(0)] + (pad_temp_shared_local[(3)] * kernel_shared_local[(3)]));
compute_local[(1)] = (compute_local[(1)] + (pad_temp_shared_local[(3)] * kernel_shared_local[(9)]));
compute_local[(0)] = (compute_local[(0)] + (pad_temp_shared_local[(4)] * kernel_shared_local[(4)]));
compute_local[(1)] = (compute_local[(1)] + (pad_temp_shared_local[(4)] * kernel_shared_local[(10)]));
compute_local[(0)] = (compute_local[(0)] + (pad_temp_shared_local[(5)] * kernel_shared_local[(5)]));
compute_local[(1)] = (compute_local[(1)] + (pad_temp_shared_local[(5)] * kernel_shared_local[(11)]));
pad_temp_shared_local[(0)] = pad_temp_shared[((((rc_inner_outer * 96) + ((int)threadIdx.x)) + 16))];
pad_temp_shared_local[(1)] = pad_temp_shared[((((rc_inner_outer * 96) + ((int)threadIdx.x)) + 17))];
pad_temp_shared_local[(2)] = pad_temp_shared[((((rc_inner_outer * 96) + ((int)threadIdx.x)) + 18))];
pad_temp_shared_local[(3)] = pad_temp_shared[((((rc_inner_outer * 96) + ((int)threadIdx.x)) + 64))];
pad_temp_shared_local[(4)] = pad_temp_shared[((((rc_inner_outer * 96) + ((int)threadIdx.x)) + 65))];
pad_temp_shared_local[(5)] = pad_temp_shared[((((rc_inner_outer * 96) + ((int)threadIdx.x)) + 66))];
kernel_shared_local[(0)] = kernel_shared[((((((int)threadIdx.z) * 144) + (rc_inner_outer * 18)) + 3))];
kernel_shared_local[(6)] = kernel_shared[((((((int)threadIdx.z) * 144) + (rc_inner_outer * 18)) + 435))];
kernel_shared_local[(1)] = kernel_shared[((((((int)threadIdx.z) * 144) + (rc_inner_outer * 18)) + 4))];
kernel_shared_local[(7)] = kernel_shared[((((((int)threadIdx.z) * 144) + (rc_inner_outer * 18)) + 436))];
kernel_shared_local[(2)] = kernel_shared[((((((int)threadIdx.z) * 144) + (rc_inner_outer * 18)) + 5))];
kernel_shared_local[(8)] = kernel_shared[((((((int)threadIdx.z) * 144) + (rc_inner_outer * 18)) + 437))];
kernel_shared_local[(3)] = kernel_shared[((((((int)threadIdx.z) * 144) + (rc_inner_outer * 18)) + 12))];
kernel_shared_local[(9)] = kernel_shared[((((((int)threadIdx.z) * 144) + (rc_inner_outer * 18)) + 444))];
kernel_shared_local[(4)] = kernel_shared[((((((int)threadIdx.z) * 144) + (rc_inner_outer * 18)) + 13))];
kernel_shared_local[(10)] = kernel_shared[((((((int)threadIdx.z) * 144) + (rc_inner_outer * 18)) + 445))];
kernel_shared_local[(5)] = kernel_shared[((((((int)threadIdx.z) * 144) + (rc_inner_outer * 18)) + 14))];
kernel_shared_local[(11)] = kernel_shared[((((((int)threadIdx.z) * 144) + (rc_inner_outer * 18)) + 446))];
compute_local[(0)] = (compute_local[(0)] + (pad_temp_shared_local[(0)] * kernel_shared_local[(0)]));
compute_local[(1)] = (compute_local[(1)] + (pad_temp_shared_local[(0)] * kernel_shared_local[(6)]));
compute_local[(0)] = (compute_local[(0)] + (pad_temp_shared_local[(1)] * kernel_shared_local[(1)]));
compute_local[(1)] = (compute_local[(1)] + (pad_temp_shared_local[(1)] * kernel_shared_local[(7)]));
compute_local[(0)] = (compute_local[(0)] + (pad_temp_shared_local[(2)] * kernel_shared_local[(2)]));
compute_local[(1)] = (compute_local[(1)] + (pad_temp_shared_local[(2)] * kernel_shared_local[(8)]));
compute_local[(0)] = (compute_local[(0)] + (pad_temp_shared_local[(3)] * kernel_shared_local[(3)]));
compute_local[(1)] = (compute_local[(1)] + (pad_temp_shared_local[(3)] * kernel_shared_local[(9)]));
compute_local[(0)] = (compute_local[(0)] + (pad_temp_shared_local[(4)] * kernel_shared_local[(4)]));
compute_local[(1)] = (compute_local[(1)] + (pad_temp_shared_local[(4)] * kernel_shared_local[(10)]));
compute_local[(0)] = (compute_local[(0)] + (pad_temp_shared_local[(5)] * kernel_shared_local[(5)]));
compute_local[(1)] = (compute_local[(1)] + (pad_temp_shared_local[(5)] * kernel_shared_local[(11)]));
pad_temp_shared_local[(0)] = pad_temp_shared[((((rc_inner_outer * 96) + ((int)threadIdx.x)) + 32))];
pad_temp_shared_local[(1)] = pad_temp_shared[((((rc_inner_outer * 96) + ((int)threadIdx.x)) + 33))];
pad_temp_shared_local[(2)] = pad_temp_shared[((((rc_inner_outer * 96) + ((int)threadIdx.x)) + 34))];
pad_temp_shared_local[(3)] = pad_temp_shared[((((rc_inner_outer * 96) + ((int)threadIdx.x)) + 80))];
pad_temp_shared_local[(4)] = pad_temp_shared[((((rc_inner_outer * 96) + ((int)threadIdx.x)) + 81))];
pad_temp_shared_local[(5)] = pad_temp_shared[((((rc_inner_outer * 96) + ((int)threadIdx.x)) + 82))];
kernel_shared_local[(0)] = kernel_shared[((((((int)threadIdx.z) * 144) + (rc_inner_outer * 18)) + 6))];
kernel_shared_local[(6)] = kernel_shared[((((((int)threadIdx.z) * 144) + (rc_inner_outer * 18)) + 438))];
kernel_shared_local[(1)] = kernel_shared[((((((int)threadIdx.z) * 144) + (rc_inner_outer * 18)) + 7))];
kernel_shared_local[(7)] = kernel_shared[((((((int)threadIdx.z) * 144) + (rc_inner_outer * 18)) + 439))];
kernel_shared_local[(2)] = kernel_shared[((((((int)threadIdx.z) * 144) + (rc_inner_outer * 18)) + 8))];
kernel_shared_local[(8)] = kernel_shared[((((((int)threadIdx.z) * 144) + (rc_inner_outer * 18)) + 440))];
kernel_shared_local[(3)] = kernel_shared[((((((int)threadIdx.z) * 144) + (rc_inner_outer * 18)) + 15))];
kernel_shared_local[(9)] = kernel_shared[((((((int)threadIdx.z) * 144) + (rc_inner_outer * 18)) + 447))];
kernel_shared_local[(4)] = kernel_shared[((((((int)threadIdx.z) * 144) + (rc_inner_outer * 18)) + 16))];
kernel_shared_local[(10)] = kernel_shared[((((((int)threadIdx.z) * 144) + (rc_inner_outer * 18)) + 448))];
kernel_shared_local[(5)] = kernel_shared[((((((int)threadIdx.z) * 144) + (rc_inner_outer * 18)) + 17))];
kernel_shared_local[(11)] = kernel_shared[((((((int)threadIdx.z) * 144) + (rc_inner_outer * 18)) + 449))];
compute_local[(0)] = (compute_local[(0)] + (pad_temp_shared_local[(0)] * kernel_shared_local[(0)]));
compute_local[(1)] = (compute_local[(1)] + (pad_temp_shared_local[(0)] * kernel_shared_local[(6)]));
compute_local[(0)] = (compute_local[(0)] + (pad_temp_shared_local[(1)] * kernel_shared_local[(1)]));
compute_local[(1)] = (compute_local[(1)] + (pad_temp_shared_local[(1)] * kernel_shared_local[(7)]));
compute_local[(0)] = (compute_local[(0)] + (pad_temp_shared_local[(2)] * kernel_shared_local[(2)]));
compute_local[(1)] = (compute_local[(1)] + (pad_temp_shared_local[(2)] * kernel_shared_local[(8)]));
compute_local[(0)] = (compute_local[(0)] + (pad_temp_shared_local[(3)] * kernel_shared_local[(3)]));
compute_local[(1)] = (compute_local[(1)] + (pad_temp_shared_local[(3)] * kernel_shared_local[(9)]));
compute_local[(0)] = (compute_local[(0)] + (pad_temp_shared_local[(4)] * kernel_shared_local[(4)]));
compute_local[(1)] = (compute_local[(1)] + (pad_temp_shared_local[(4)] * kernel_shared_local[(10)]));
compute_local[(0)] = (compute_local[(0)] + (pad_temp_shared_local[(5)] * kernel_shared_local[(5)]));
compute_local[(1)] = (compute_local[(1)] + (pad_temp_shared_local[(5)] * kernel_shared_local[(11)]));
}
}
compute[(((((((int)blockIdx.z) * 1176) + (((int)threadIdx.z) * 196)) + (((int)blockIdx.y) * 14)) + ((int)threadIdx.x)))] = compute_local[(0)];
compute[((((((((int)blockIdx.z) * 1176) + (((int)threadIdx.z) * 196)) + (((int)blockIdx.y) * 14)) + ((int)threadIdx.x)) + 588))] = compute_local[(1)];
}
class ConvGemm{
public:
float *cpuKernel;
float alpha = 1.0f;
float beta = 0.0f;
cudnnHandle_t convCudnn;
void* d_workspace{nullptr};
size_t workspace_bytes{0};
cudnnTensorDescriptor_t convInputDescriptor;
cudnnTensorDescriptor_t convOutputDescriptor;
cudnnFilterDescriptor_t convKernelDescriptor;
cudnnConvolutionDescriptor_t convDesc;
float *output;
float *kernel;
void initialize();
float *forward(float *input);
};
void ConvGemm::initialize(){
hipMalloc(&kernel,sizeof(float)*C*N*9);
hipMalloc(&this->output,sizeof(float)*N*H*W);
cudnnCreate(&convCudnn);
cudnnCreateTensorDescriptor(&convInputDescriptor);
cudnnSetTensor4dDescriptor(convInputDescriptor,
/*format=*/CUDNN_TENSOR_NCHW,
/*dataType=*/CUDNN_DATA_FLOAT,
/*batch_size=*/1,
/*channels=*/C,
/*image_height=*/H,
/*image_width=*/W);
cudnnCreateFilterDescriptor(&convKernelDescriptor);
cudnnSetFilter4dDescriptor(convKernelDescriptor,
/*dataType=*/CUDNN_DATA_FLOAT,
/*format=*/CUDNN_TENSOR_NCHW,
/*out_channels=*/N,
/*in_channels=*/C,
/*kernel_height=*/R,
/*kernel_width=*/S);
cudnnCreateConvolutionDescriptor(&convDesc);
cudnnSetConvolution2dDescriptor(convDesc,
/*pad_height=*/1,
/*pad_width=*/1,
/*vertical_stride=*/1,
/*horizontal_stride=*/1,
/*dilation_height=*/1,
/*dilation_width=*/1,
/*mode=*/CUDNN_CROSS_CORRELATION,
CUDNN_DATA_FLOAT);
int batch_size{0}, channels{0}, height{0}, width{0};
cudnnGetConvolution2dForwardOutputDim(convDesc,
convInputDescriptor,
convKernelDescriptor,
&batch_size,
&channels,
&height,
&width);
cudnnCreateTensorDescriptor(&convOutputDescriptor);
cudnnSetTensor4dDescriptor(convOutputDescriptor,
/*format=*/CUDNN_TENSOR_NCHW,
/*dataType=*/CUDNN_DATA_FLOAT,
/*batch_size=*/1,
/*channels=*/N,
/*image_height=*/H,
/*image_width=*/W);
cudnnGetConvolutionForwardWorkspaceSize(convCudnn,
convInputDescriptor,
convKernelDescriptor,
convDesc,
convOutputDescriptor,
CUDNN_CONVOLUTION_FWD_ALGO_IMPLICIT_GEMM,
&workspace_bytes);
hipMalloc(&d_workspace, workspace_bytes);
unsigned int kernelSize = R*S*C*N;//kernel
this->cpuKernel = (float *)malloc(kernelSize*sizeof(float));
for(int i=0;i<kernelSize;++i){
this->cpuKernel[i] = 1.0f;
}
hipMemcpy(kernel,cpuKernel,R*S*C*N*sizeof(float),hipMemcpyHostToDevice);
free(cpuKernel);
}
float * ConvGemm::forward(float *input) {
hipMemset(output, 0, 1*N*H*W*sizeof(float));
checkCUDNN(cudnnConvolutionForward(convCudnn,
&alpha,
convInputDescriptor,
input,
convKernelDescriptor,
kernel,
convDesc,
CUDNN_CONVOLUTION_FWD_ALGO_IMPLICIT_GEMM,
d_workspace,
workspace_bytes,
&beta,
convOutputDescriptor,
output));
return output;
}
class ConvWinogradeNon{
public:
float *cpuKernel;
float alpha = 1.0f;
float beta = 0.0f;
cudnnHandle_t convCudnn;
void* d_workspace{nullptr};
size_t workspace_bytes{0};
cudnnTensorDescriptor_t convInputDescriptor;
cudnnTensorDescriptor_t convOutputDescriptor;
cudnnFilterDescriptor_t convKernelDescriptor;
cudnnConvolutionDescriptor_t convDesc;
float *output;
float *kernel;
void initialize();
float *forward(float *input);
};
void ConvWinogradeNon::initialize(){
hipMalloc(&kernel,sizeof(float)*C*N*9);
hipMalloc(&this->output,sizeof(float)*N*H*W);
cudnnCreate(&convCudnn);
cudnnCreateTensorDescriptor(&convInputDescriptor);
cudnnSetTensor4dDescriptor(convInputDescriptor,
/*format=*/CUDNN_TENSOR_NCHW,
/*dataType=*/CUDNN_DATA_FLOAT,
/*batch_size=*/1,
/*channels=*/C,
/*image_height=*/H,
/*image_width=*/W);
cudnnCreateFilterDescriptor(&convKernelDescriptor);
cudnnSetFilter4dDescriptor(convKernelDescriptor,
/*dataType=*/CUDNN_DATA_FLOAT,
/*format=*/CUDNN_TENSOR_NCHW,
/*out_channels=*/N,
/*in_channels=*/C,
/*kernel_height=*/R,
/*kernel_width=*/S);
cudnnCreateConvolutionDescriptor(&convDesc);
cudnnSetConvolution2dDescriptor(convDesc,
/*pad_height=*/1,
/*pad_width=*/1,
/*vertical_stride=*/1,
/*horizontal_stride=*/1,
/*dilation_height=*/1,
/*dilation_width=*/1,
/*mode=*/CUDNN_CROSS_CORRELATION,
CUDNN_DATA_FLOAT);
int batch_size{0}, channels{0}, height{0}, width{0};
cudnnGetConvolution2dForwardOutputDim(convDesc,
convInputDescriptor,
convKernelDescriptor,
&batch_size,
&channels,
&height,
&width);
cudnnCreateTensorDescriptor(&convOutputDescriptor);
cudnnSetTensor4dDescriptor(convOutputDescriptor,
/*format=*/CUDNN_TENSOR_NCHW,
/*dataType=*/CUDNN_DATA_FLOAT,
/*batch_size=*/1,
/*channels=*/N,
/*image_height=*/H,
/*image_width=*/W);
cudnnGetConvolutionForwardWorkspaceSize(convCudnn,
convInputDescriptor,
convKernelDescriptor,
convDesc,
convOutputDescriptor,
CUDNN_CONVOLUTION_FWD_ALGO_WINOGRAD_NONFUSED,
&workspace_bytes);
hipMalloc(&d_workspace, workspace_bytes);
unsigned int kernelSize = R*S*C*N;//kernel
this->cpuKernel = (float *)malloc(kernelSize*sizeof(float));
for(int i=0;i<kernelSize;++i){
this->cpuKernel[i] = 1.0f;
}
hipMemcpy(kernel,cpuKernel,R*S*C*N*sizeof(float),hipMemcpyHostToDevice);
free(cpuKernel);
}
float * ConvWinogradeNon::forward(float *input) {
hipMemset(output, 0, 1*N*H*W*sizeof(float));
checkCUDNN(cudnnConvolutionForward(convCudnn,
&alpha,
convInputDescriptor,
input,
convKernelDescriptor,
kernel,
convDesc,
CUDNN_CONVOLUTION_FWD_ALGO_WINOGRAD_NONFUSED,
d_workspace,
workspace_bytes,
&beta,
convOutputDescriptor,
output));
return output;
}
class ConvFFT{
public:
float *cpuKernel;
float alpha = 1.0f;
float beta = 0.0f;
cudnnHandle_t convCudnn;
void* d_workspace{nullptr};
size_t workspace_bytes{0};
cudnnTensorDescriptor_t convInputDescriptor;
cudnnTensorDescriptor_t convOutputDescriptor;
cudnnFilterDescriptor_t convKernelDescriptor;
cudnnConvolutionDescriptor_t convDesc;
float *output;
float *kernel;
void initialize();
float *forward(float *input);
};
void ConvFFT::initialize(){
hipMalloc(&kernel,sizeof(float)*C*N*9);
hipMalloc(&this->output,sizeof(float)*N*H*W);
cudnnCreate(&convCudnn);
cudnnCreateTensorDescriptor(&convInputDescriptor);
cudnnSetTensor4dDescriptor(convInputDescriptor,
/*format=*/CUDNN_TENSOR_NCHW,
/*dataType=*/CUDNN_DATA_FLOAT,
/*batch_size=*/1,
/*channels=*/C,
/*image_height=*/H,
/*image_width=*/W);
cudnnCreateFilterDescriptor(&convKernelDescriptor);
cudnnSetFilter4dDescriptor(convKernelDescriptor,
/*dataType=*/CUDNN_DATA_FLOAT,
/*format=*/CUDNN_TENSOR_NCHW,
/*out_channels=*/N,
/*in_channels=*/C,
/*kernel_height=*/R,
/*kernel_width=*/S);
cudnnCreateConvolutionDescriptor(&convDesc);
cudnnSetConvolution2dDescriptor(convDesc,
/*pad_height=*/1,
/*pad_width=*/1,
/*vertical_stride=*/1,
/*horizontal_stride=*/1,
/*dilation_height=*/1,
/*dilation_width=*/1,
/*mode=*/CUDNN_CROSS_CORRELATION,
CUDNN_DATA_FLOAT);
int batch_size{0}, channels{0}, height{0}, width{0};
cudnnGetConvolution2dForwardOutputDim(convDesc,
convInputDescriptor,
convKernelDescriptor,
&batch_size,
&channels,
&height,
&width);
cudnnCreateTensorDescriptor(&convOutputDescriptor);
cudnnSetTensor4dDescriptor(convOutputDescriptor,
/*format=*/CUDNN_TENSOR_NCHW,
/*dataType=*/CUDNN_DATA_FLOAT,
/*batch_size=*/1,
/*channels=*/N,
/*image_height=*/H,
/*image_width=*/W);
cudnnGetConvolutionForwardWorkspaceSize(convCudnn,
convInputDescriptor,
convKernelDescriptor,
convDesc,
convOutputDescriptor,
CUDNN_CONVOLUTION_FWD_ALGO_FFT,
&workspace_bytes);
hipMalloc(&d_workspace, workspace_bytes);
unsigned int kernelSize = R*S*C*N;//kernel
this->cpuKernel = (float *)malloc(kernelSize*sizeof(float));
for(int i=0;i<kernelSize;++i){
this->cpuKernel[i] = 1.0f;
}
hipMemcpy(kernel,cpuKernel,R*S*C*N*sizeof(float),hipMemcpyHostToDevice);
free(cpuKernel);
}
float * ConvFFT::forward(float *input) {
hipMemset(output, 0, 1*N*H*W*sizeof(float));
checkCUDNN(cudnnConvolutionForward(convCudnn,
&alpha,
convInputDescriptor,
input,
convKernelDescriptor,
kernel,
convDesc,
CUDNN_CONVOLUTION_FWD_ALGO_FFT,
d_workspace,
workspace_bytes,
&beta,
convOutputDescriptor,
output));
return output;
}
__device__ void load_input_2_shared_memory(float *input, float *shared_input, unsigned int h_start,
unsigned int h_end, unsigned int h_offset, unsigned int c_start,
unsigned int warp_id, unsigned int lane_id, unsigned int warp_size){
switch(h_offset){
case 0:
for(unsigned int c = warp_id; c<TC; c+=TWS){
for(unsigned int i=lane_id; i<(h_end - h_start) * W; i+=warp_size){
unsigned int r = i/W;
unsigned int s = i%W;
shared_input[c*(TH + 2)*(WPAD) + r * WPAD + s + 1] = input[(c_start + c) * H * W + h_start * W + i];
}
}
break;
case 1:
for(unsigned int c = warp_id; c<TC; c+=TWS){
for(unsigned int i=lane_id; i<(h_end - h_start) * W; i+=warp_size){
unsigned int r = i/W;
unsigned int s = i%W;
shared_input[c*(TH + 2)*(WPAD) + (1 + r) * WPAD + s + 1] = input[(c_start + c) * H * W + h_start * W + i];
}
}
break;
}
}
__device__ __forceinline__ void switch_write_back(unsigned int write_h, unsigned int write_w, unsigned int h_out_start, unsigned int w_out_start, unsigned int n, float * outputs, float * temp_result){
switch(write_h){
case 1:
switch(write_w){
case 1:
#pragma unroll
for (unsigned int th = 0; th < 1; ++th) {
#pragma unroll
for (unsigned int tw = 0; tw < 1; ++tw) {
atomicAdd(&outputs[n*H*W+(h_out_start + th) * W+(w_out_start + tw)],temp_result[(th * TW + tw)]);
}
}
break;
case 2:
#pragma unroll
for (unsigned int th = 0; th < 1; ++th) {
#pragma unroll
for (unsigned int tw = 0; tw < 2; ++tw) {
atomicAdd(&outputs[n*H*W+(h_out_start + th) * W+(w_out_start + tw)],temp_result[(th * TW + tw)]);
}
}
break;
}
break;
case 2:
switch(write_w){
case 1:
#pragma unroll
for (unsigned int th = 0; th < 2; ++th) {
#pragma unroll
for (unsigned int tw = 0; tw < 1; ++tw) {
atomicAdd(&outputs[n*H*W+(h_out_start + th) * W+(w_out_start + tw)],temp_result[(th * TW + tw)]);
}
}
break;
case 2:
#pragma unroll
for (unsigned int th = 0; th < 2; ++th) {
#pragma unroll
for (unsigned int tw = 0; tw < 2; ++tw) {
atomicAdd(&outputs[n*H*W+(h_out_start + th) * W+(w_out_start + tw)],temp_result[(th * TW + tw)]);
}
}
break;
}
break;
}
}
__global__ void conv2d(float * __restrict__ input,const float * __restrict__ kernel, float * __restrict__ outputs){
extern __shared__ float shared_input[];
const unsigned int tile_id = blockIdx.x;
const unsigned int tc_id = tile_id / THS;
const unsigned int th_id = tile_id % THS;
const unsigned int tw_id = threadIdx.x / N;
const int h_out_start = th_id * TH;
const int w_out_start = tw_id * TW;
const unsigned int warp_id = tw_id;
const unsigned int lane_id = threadIdx.x % N;
float data_array[9];
float temp_result[TH*TW] = {0.0f};
for(unsigned int i=threadIdx.x;i<TC*(TH+2)*WPAD;i+=blockDim.x){
shared_input[i] = 0.0f;
}
unsigned int n = lane_id;
unsigned int c_offset = tc_id * TC;
int h_offset = (h_out_start == 0)?1:0;
int h_padded_start = h_out_start;
int h_padded_end = min(h_padded_start + TH + 2, H + 2);
int h_non_padded_start = max(h_out_start - 1, 0);
int h_non_padded_end = min(H, h_padded_end - 1);
__syncthreads();
load_input_2_shared_memory(input, shared_input, h_non_padded_start, h_non_padded_end, h_offset, c_offset, warp_id, lane_id, N);
__syncthreads();
#pragma unroll
for(unsigned int c=0;c<TC;c++){
#pragma unroll
for(unsigned int r=0;r<R;++r){
#pragma unroll
for(unsigned int s=0;s<S;++s){
data_array[r*S+s] = kernel[(c + c_offset)*N*9+r*3*N+s*N+n];
}
}
temp_result[0] += shared_input[c*(TH+2)*(WPAD) + 0 * WPAD + tw_id * TW + 0]*data_array[0];
temp_result[1] += shared_input[c*(TH+2)*(WPAD) + 0 * WPAD + tw_id * TW + 1]*data_array[0];
temp_result[0] += shared_input[c*(TH+2)*(WPAD) + 0 * WPAD + tw_id * TW + 1]*data_array[1];
temp_result[1] += shared_input[c*(TH+2)*(WPAD) + 0 * WPAD + tw_id * TW + 2]*data_array[1];
temp_result[0] += shared_input[c*(TH+2)*(WPAD) + 0 * WPAD + tw_id * TW + 2]*data_array[2];
temp_result[1] += shared_input[c*(TH+2)*(WPAD) + 0 * WPAD + tw_id * TW + 3]*data_array[2];
temp_result[2] += shared_input[c*(TH+2)*(WPAD) + 1 * WPAD + tw_id * TW + 0]*data_array[0];
temp_result[0] += shared_input[c*(TH+2)*(WPAD) + 1 * WPAD + tw_id * TW + 0]*data_array[3];
temp_result[3] += shared_input[c*(TH+2)*(WPAD) + 1 * WPAD + tw_id * TW + 1]*data_array[0];
temp_result[2] += shared_input[c*(TH+2)*(WPAD) + 1 * WPAD + tw_id * TW + 1]*data_array[1];
temp_result[1] += shared_input[c*(TH+2)*(WPAD) + 1 * WPAD + tw_id * TW + 1]*data_array[3];
temp_result[0] += shared_input[c*(TH+2)*(WPAD) + 1 * WPAD + tw_id * TW + 1]*data_array[4];
temp_result[3] += shared_input[c*(TH+2)*(WPAD) + 1 * WPAD + tw_id * TW + 2]*data_array[1];
temp_result[2] += shared_input[c*(TH+2)*(WPAD) + 1 * WPAD + tw_id * TW + 2]*data_array[2];
temp_result[1] += shared_input[c*(TH+2)*(WPAD) + 1 * WPAD + tw_id * TW + 2]*data_array[4];
temp_result[0] += shared_input[c*(TH+2)*(WPAD) + 1 * WPAD + tw_id * TW + 2]*data_array[5];
temp_result[3] += shared_input[c*(TH+2)*(WPAD) + 1 * WPAD + tw_id * TW + 3]*data_array[2];
temp_result[1] += shared_input[c*(TH+2)*(WPAD) + 1 * WPAD + tw_id * TW + 3]*data_array[5];
temp_result[2] += shared_input[c*(TH+2)*(WPAD) + 2 * WPAD + tw_id * TW + 0]*data_array[3];
temp_result[0] += shared_input[c*(TH+2)*(WPAD) + 2 * WPAD + tw_id * TW + 0]*data_array[6];
temp_result[3] += shared_input[c*(TH+2)*(WPAD) + 2 * WPAD + tw_id * TW + 1]*data_array[3];
temp_result[2] += shared_input[c*(TH+2)*(WPAD) + 2 * WPAD + tw_id * TW + 1]*data_array[4];
temp_result[1] += shared_input[c*(TH+2)*(WPAD) + 2 * WPAD + tw_id * TW + 1]*data_array[6];
temp_result[0] += shared_input[c*(TH+2)*(WPAD) + 2 * WPAD + tw_id * TW + 1]*data_array[7];
temp_result[3] += shared_input[c*(TH+2)*(WPAD) + 2 * WPAD + tw_id * TW + 2]*data_array[4];
temp_result[2] += shared_input[c*(TH+2)*(WPAD) + 2 * WPAD + tw_id * TW + 2]*data_array[5];
temp_result[1] += shared_input[c*(TH+2)*(WPAD) + 2 * WPAD + tw_id * TW + 2]*data_array[7];
temp_result[0] += shared_input[c*(TH+2)*(WPAD) + 2 * WPAD + tw_id * TW + 2]*data_array[8];
temp_result[3] += shared_input[c*(TH+2)*(WPAD) + 2 * WPAD + tw_id * TW + 3]*data_array[5];
temp_result[1] += shared_input[c*(TH+2)*(WPAD) + 2 * WPAD + tw_id * TW + 3]*data_array[8];
temp_result[2] += shared_input[c*(TH+2)*(WPAD) + 3 * WPAD + tw_id * TW + 0]*data_array[6];
temp_result[3] += shared_input[c*(TH+2)*(WPAD) + 3 * WPAD + tw_id * TW + 1]*data_array[6];
temp_result[2] += shared_input[c*(TH+2)*(WPAD) + 3 * WPAD + tw_id * TW + 1]*data_array[7];
temp_result[3] += shared_input[c*(TH+2)*(WPAD) + 3 * WPAD + tw_id * TW + 2]*data_array[7];
temp_result[2] += shared_input[c*(TH+2)*(WPAD) + 3 * WPAD + tw_id * TW + 2]*data_array[8];
temp_result[3] += shared_input[c*(TH+2)*(WPAD) + 3 * WPAD + tw_id * TW + 3]*data_array[8];
}
switch_write_back(min(TH, H - h_out_start), min(TW, W - w_out_start), h_out_start, w_out_start, n, outputs, temp_result);
}
float check_diff(float *x, float *y, unsigned int size){
float diff = 0.0f;
#pragma omp parallel for reduction(+ : diff)
for(unsigned int i=0;i<size;++i){
diff += abs(x[i] - y[i]);
}
return diff;
}
int main(void){
float *input = new float[C*H*W];
time_t t;
float *matrix;
hipMalloc(&matrix,C*(TH+2)*(TW+2)*THS*TWS*sizeof(float));
hipMemset(matrix,0,C*(TH+2)*(TW+2)*THS*TWS*sizeof(float));
srand((unsigned) time(&t));
for(int i =0;i<C*H*W;++i){
input[i] = rand() % 10;
}
float *device_input;
hipMalloc(&device_input,C*H*W*sizeof(float));
hipMemcpy(device_input,input,C*H*W*sizeof(float),hipMemcpyHostToDevice);
float *K = new float[C*N*9];
for(int i=0;i<C*N*9;++i){
K[i] = 1.0f;
}
ConvGemm convGemm;
convGemm.initialize();
ConvWinogradeNon convWinogradeNon;
convWinogradeNon.initialize();
ConvFFT convFFT;
convFFT.initialize();
float *out_cudnn;
float *out_cudnn_host = new float[N*H*W];
hipEvent_t event_start;
hipEvent_t event_stop;
hipEventCreate(&event_start);
hipEventCreate(&event_stop);
out_cudnn = convGemm.forward(device_input);
hipMemcpy(out_cudnn_host,out_cudnn,N*H*W*sizeof(float),hipMemcpyDeviceToHost);
out_cudnn = convFFT.forward(device_input);
out_cudnn = convWinogradeNon.forward(device_input);
float *device_K;
float *device_out;
hipMalloc(&device_out,H*W*N*sizeof(float));
hipMemset(device_out,0,H*W*N*sizeof(float));
hipMalloc(&device_K,C*N*9*sizeof(float));
hipMemcpy(device_K,K,C*N*9*sizeof(float),hipMemcpyHostToDevice);
hipEventRecord(event_start);
convGemm.forward(device_input);
hipEventRecord(event_stop);
hipEventSynchronize(event_stop);
float cudnnGemmTime;
hipEventElapsedTime(&cudnnGemmTime, event_start, event_stop);
hipEventRecord(event_start);
convWinogradeNon.forward(device_input);
hipEventRecord(event_stop);
hipEventSynchronize(event_stop);
float cudnnWinogradeTimeNon;
hipEventElapsedTime(&cudnnWinogradeTimeNon, event_start, event_stop);
hipEventRecord(event_start);
convFFT.forward(device_input);
hipEventRecord(event_stop);
hipEventSynchronize(event_stop);
float cudnnFFTTime;
hipEventElapsedTime(&cudnnFFTTime, event_start, event_stop);
dim3 grid(1,14,16);
dim3 block(14,1,3);
hipEventRecord(event_start);
hipLaunchKernelGGL(( default_function_kernel0), dim3(grid), dim3(block), 0, 0, device_input, device_K, device_out);
hipEventRecord(event_stop);
hipEventSynchronize(event_stop);
float time_tvm;
hipEventElapsedTime(&time_tvm, event_start, event_stop);
float *out_tvm = new float[N*H*W];
hipMemcpy(out_tvm,device_out,N*H*W*sizeof(float),hipMemcpyDeviceToHost);
hipMemset(device_out, 0, sizeof(float)*N*H*W);
chkerr(hipFuncSetAttribute(conv2d,hipFuncAttributeMaxDynamicSharedMemorySize, TC*(TH+2)*(WPAD)*4));
hipEventRecord(event_start);
hipLaunchKernelGGL(( conv2d), dim3(TCS*THS), dim3(N * TWS), TC*(TH+2)*(WPAD)*4, 0, device_input, device_K, device_out);
hipEventRecord(event_stop);
hipEventSynchronize(event_stop);
float time_tdc;
hipEventElapsedTime(&time_tdc, event_start, event_stop);
float *out_tdc = new float[N*H*W];
hipMemcpy(out_tdc,device_out,N*H*W*sizeof(float),hipMemcpyDeviceToHost);
ofstream outfile;
char buffer[1000];
int ret = sprintf(buffer,"%d,%d,%d,%d,%f,%f,%f,%f,%f,%f,%f,%f,%f\n",N,C,H,W,
cudnnFFTTime,cudnnWinogradeTimeNon,cudnnGemmTime,time_tvm,time_tdc,
cudnnFFTTime/time_tdc,cudnnWinogradeTimeNon/time_tdc,cudnnGemmTime/time_tdc,time_tvm/time_tdc);
outfile.open("../../evaluation_outcome/2080Ti-layers-eval-modeling.csv", std::ios_base::app);
outfile << buffer;
float difference = check_diff(out_cudnn_host, out_tdc, N*H*W);
cout<<N<<","<<C<<","<<H<<","<<W<<","<<cudnnFFTTime<<","<<cudnnWinogradeTimeNon<<","<<cudnnGemmTime<<","<<
time_tvm<<","<<time_tdc<<","<<cudnnFFTTime/time_tdc<<","<<cudnnWinogradeTimeNon/time_tdc<<","<<
cudnnGemmTime/time_tdc<<","<<time_tvm/time_tdc<<endl;
return 0;
}
| 7682a8ab22af99e3c1b8b9613655631656a43591.cu | #include <cudnn.h>
#include <stdio.h>
#include <cuda.h>
#include <malloc.h>
#include <cstdlib>
#include <time.h>
#include <iostream>
#include <sys/types.h>
#include <errno.h>
#include <vector>
#include <fstream>
#include <string>
#include <omp.h>
#define TH 2
#define TW 2
#define TC 16
#define C 128
#define N 96
#define H 14
#define W 14
#define TCS ((C-1)/TC + 1)
#define THS ((H-1)/TH + 1)
#define TWS ((W-1)/TW+1)
#define WPAD (TWS*TW + 2)
#define R 3
#define S 3
using namespace std;
#define checkCUDNN(expression) \
{ \
cudnnStatus_t status = (expression); \
if (status != CUDNN_STATUS_SUCCESS) { \
std::cerr << "Error on line " << __LINE__ << ": " \
<< cudnnGetErrorString(status) << std::endl; \
std::exit(EXIT_FAILURE); \
} \
}
inline void chkerr(cudaError_t code)
{
if (code != cudaSuccess)
{
std::cerr << "ERROR!!!:" << cudaGetErrorString(code) <<endl;
exit(-1);
}
}
extern "C" __global__ void default_function_kernel0(float* __restrict__ data, float* __restrict__ kernel, float* __restrict__ compute) {
float compute_local[2];
__shared__ float pad_temp_shared[768];
__shared__ float kernel_shared[864];
float pad_temp_shared_local[6];
float kernel_shared_local[12];
compute_local[(0)] = 0.000000e+00f;
compute_local[(1)] = 0.000000e+00f;
for (int rc_outer = 0; rc_outer < 8; ++rc_outer) {
__syncthreads();
pad_temp_shared[(((((int)threadIdx.z) * 256) + (((int)threadIdx.x) * 19)))] = (((((1 <= (((int)blockIdx.y) + (((((int)threadIdx.z) * 16) + ((((int)threadIdx.x) * 19) >> 4)) % 3))) && ((((int)blockIdx.y) + (((((int)threadIdx.z) * 16) + ((((int)threadIdx.x) * 19) >> 4)) % 3)) < 15)) && (1 <= ((((int)threadIdx.x) * 19) & 15))) && (((((int)threadIdx.x) * 19) & 15) < 15)) ? data[(((((((rc_outer * 3136) + ((((((int)threadIdx.z) * 16) + ((((int)threadIdx.x) * 19) >> 4)) / 3) * 196)) + (((int)blockIdx.y) * 14)) + ((((((int)threadIdx.z) * 16) + ((((int)threadIdx.x) * 19) >> 4)) % 3) * 14)) + ((((int)threadIdx.x) * 19) & 15)) - 15))] : 0.000000e+00f);
pad_temp_shared[((((((int)threadIdx.z) * 256) + (((int)threadIdx.x) * 19)) + 1))] = (((((1 <= (((int)blockIdx.y) + (((((int)threadIdx.z) * 16) + (((((int)threadIdx.x) * 19) + 1) >> 4)) % 3))) && ((((int)blockIdx.y) + (((((int)threadIdx.z) * 16) + (((((int)threadIdx.x) * 19) + 1) >> 4)) % 3)) < 15)) && (1 <= (((((int)threadIdx.x) * 19) + 1) & 15))) && ((((((int)threadIdx.x) * 19) + 1) & 15) < 15)) ? data[(((((((rc_outer * 3136) + ((((((int)threadIdx.z) * 16) + (((((int)threadIdx.x) * 19) + 1) >> 4)) / 3) * 196)) + (((int)blockIdx.y) * 14)) + ((((((int)threadIdx.z) * 16) + (((((int)threadIdx.x) * 19) + 1) >> 4)) % 3) * 14)) + (((((int)threadIdx.x) * 19) + 1) & 15)) - 15))] : 0.000000e+00f);
pad_temp_shared[((((((int)threadIdx.z) * 256) + (((int)threadIdx.x) * 19)) + 2))] = (((((1 <= (((int)blockIdx.y) + (((((int)threadIdx.z) * 16) + (((((int)threadIdx.x) * 19) + 2) >> 4)) % 3))) && ((((int)blockIdx.y) + (((((int)threadIdx.z) * 16) + (((((int)threadIdx.x) * 19) + 2) >> 4)) % 3)) < 15)) && (1 <= (((((int)threadIdx.x) * 19) + 2) & 15))) && ((((((int)threadIdx.x) * 19) + 2) & 15) < 15)) ? data[(((((((rc_outer * 3136) + ((((((int)threadIdx.z) * 16) + (((((int)threadIdx.x) * 19) + 2) >> 4)) / 3) * 196)) + (((int)blockIdx.y) * 14)) + ((((((int)threadIdx.z) * 16) + (((((int)threadIdx.x) * 19) + 2) >> 4)) % 3) * 14)) + (((((int)threadIdx.x) * 19) + 2) & 15)) - 15))] : 0.000000e+00f);
pad_temp_shared[((((((int)threadIdx.z) * 256) + (((int)threadIdx.x) * 19)) + 3))] = (((((1 <= (((int)blockIdx.y) + (((((int)threadIdx.z) * 16) + (((((int)threadIdx.x) * 19) + 3) >> 4)) % 3))) && ((((int)blockIdx.y) + (((((int)threadIdx.z) * 16) + (((((int)threadIdx.x) * 19) + 3) >> 4)) % 3)) < 15)) && (1 <= (((((int)threadIdx.x) * 19) + 3) & 15))) && ((((((int)threadIdx.x) * 19) + 3) & 15) < 15)) ? data[(((((((rc_outer * 3136) + ((((((int)threadIdx.z) * 16) + (((((int)threadIdx.x) * 19) + 3) >> 4)) / 3) * 196)) + (((int)blockIdx.y) * 14)) + ((((((int)threadIdx.z) * 16) + (((((int)threadIdx.x) * 19) + 3) >> 4)) % 3) * 14)) + (((((int)threadIdx.x) * 19) + 3) & 15)) - 15))] : 0.000000e+00f);
pad_temp_shared[((((((int)threadIdx.z) * 256) + (((int)threadIdx.x) * 19)) + 4))] = (((((1 <= (((int)blockIdx.y) + (((((int)threadIdx.z) * 16) + (((((int)threadIdx.x) * 19) + 4) >> 4)) % 3))) && ((((int)blockIdx.y) + (((((int)threadIdx.z) * 16) + (((((int)threadIdx.x) * 19) + 4) >> 4)) % 3)) < 15)) && (1 <= (((((int)threadIdx.x) * 19) + 4) & 15))) && ((((((int)threadIdx.x) * 19) + 4) & 15) < 15)) ? data[(((((((rc_outer * 3136) + ((((((int)threadIdx.z) * 16) + (((((int)threadIdx.x) * 19) + 4) >> 4)) / 3) * 196)) + (((int)blockIdx.y) * 14)) + ((((((int)threadIdx.z) * 16) + (((((int)threadIdx.x) * 19) + 4) >> 4)) % 3) * 14)) + (((((int)threadIdx.x) * 19) + 4) & 15)) - 15))] : 0.000000e+00f);
pad_temp_shared[((((((int)threadIdx.z) * 256) + (((int)threadIdx.x) * 19)) + 5))] = (((((1 <= (((int)blockIdx.y) + (((((int)threadIdx.z) * 16) + (((((int)threadIdx.x) * 19) + 5) >> 4)) % 3))) && ((((int)blockIdx.y) + (((((int)threadIdx.z) * 16) + (((((int)threadIdx.x) * 19) + 5) >> 4)) % 3)) < 15)) && (1 <= (((((int)threadIdx.x) * 19) + 5) & 15))) && ((((((int)threadIdx.x) * 19) + 5) & 15) < 15)) ? data[(((((((rc_outer * 3136) + ((((((int)threadIdx.z) * 16) + (((((int)threadIdx.x) * 19) + 5) >> 4)) / 3) * 196)) + (((int)blockIdx.y) * 14)) + ((((((int)threadIdx.z) * 16) + (((((int)threadIdx.x) * 19) + 5) >> 4)) % 3) * 14)) + (((((int)threadIdx.x) * 19) + 5) & 15)) - 15))] : 0.000000e+00f);
pad_temp_shared[((((((int)threadIdx.z) * 256) + (((int)threadIdx.x) * 19)) + 6))] = (((((1 <= (((int)blockIdx.y) + (((((int)threadIdx.z) * 16) + (((((int)threadIdx.x) * 19) + 6) >> 4)) % 3))) && ((((int)blockIdx.y) + (((((int)threadIdx.z) * 16) + (((((int)threadIdx.x) * 19) + 6) >> 4)) % 3)) < 15)) && (1 <= (((((int)threadIdx.x) * 19) + 6) & 15))) && ((((((int)threadIdx.x) * 19) + 6) & 15) < 15)) ? data[(((((((rc_outer * 3136) + ((((((int)threadIdx.z) * 16) + (((((int)threadIdx.x) * 19) + 6) >> 4)) / 3) * 196)) + (((int)blockIdx.y) * 14)) + ((((((int)threadIdx.z) * 16) + (((((int)threadIdx.x) * 19) + 6) >> 4)) % 3) * 14)) + (((((int)threadIdx.x) * 19) + 6) & 15)) - 15))] : 0.000000e+00f);
pad_temp_shared[((((((int)threadIdx.z) * 256) + (((int)threadIdx.x) * 19)) + 7))] = (((((1 <= (((int)blockIdx.y) + (((((int)threadIdx.z) * 16) + (((((int)threadIdx.x) * 19) + 7) >> 4)) % 3))) && ((((int)blockIdx.y) + (((((int)threadIdx.z) * 16) + (((((int)threadIdx.x) * 19) + 7) >> 4)) % 3)) < 15)) && (1 <= (((((int)threadIdx.x) * 19) + 7) & 15))) && ((((((int)threadIdx.x) * 19) + 7) & 15) < 15)) ? data[(((((((rc_outer * 3136) + ((((((int)threadIdx.z) * 16) + (((((int)threadIdx.x) * 19) + 7) >> 4)) / 3) * 196)) + (((int)blockIdx.y) * 14)) + ((((((int)threadIdx.z) * 16) + (((((int)threadIdx.x) * 19) + 7) >> 4)) % 3) * 14)) + (((((int)threadIdx.x) * 19) + 7) & 15)) - 15))] : 0.000000e+00f);
pad_temp_shared[((((((int)threadIdx.z) * 256) + (((int)threadIdx.x) * 19)) + 8))] = (((((1 <= (((int)blockIdx.y) + (((((int)threadIdx.z) * 16) + (((((int)threadIdx.x) * 19) + 8) >> 4)) % 3))) && ((((int)blockIdx.y) + (((((int)threadIdx.z) * 16) + (((((int)threadIdx.x) * 19) + 8) >> 4)) % 3)) < 15)) && (1 <= (((((int)threadIdx.x) * 19) + 8) & 15))) && ((((((int)threadIdx.x) * 19) + 8) & 15) < 15)) ? data[(((((((rc_outer * 3136) + ((((((int)threadIdx.z) * 16) + (((((int)threadIdx.x) * 19) + 8) >> 4)) / 3) * 196)) + (((int)blockIdx.y) * 14)) + ((((((int)threadIdx.z) * 16) + (((((int)threadIdx.x) * 19) + 8) >> 4)) % 3) * 14)) + (((((int)threadIdx.x) * 19) + 8) & 15)) - 15))] : 0.000000e+00f);
if (((((int)threadIdx.z) * 16) + (((((int)threadIdx.x) * 19) + 9) >> 4)) < 48) {
if (((((int)threadIdx.z) * 256) + (((int)threadIdx.x) * 19)) < 759) {
if (((int)threadIdx.x) < 13) {
pad_temp_shared[((((((int)threadIdx.z) * 256) + (((int)threadIdx.x) * 19)) + 9))] = (((((1 <= (((int)blockIdx.y) + (((((int)threadIdx.z) * 16) + (((((int)threadIdx.x) * 19) + 9) >> 4)) % 3))) && ((((int)blockIdx.y) + (((((int)threadIdx.z) * 16) + (((((int)threadIdx.x) * 19) + 9) >> 4)) % 3)) < 15)) && (1 <= (((((int)threadIdx.x) * 19) + 9) & 15))) && ((((((int)threadIdx.x) * 19) + 9) & 15) < 15)) ? data[(((((((rc_outer * 3136) + ((((((int)threadIdx.z) * 16) + (((((int)threadIdx.x) * 19) + 9) >> 4)) / 3) * 196)) + (((int)blockIdx.y) * 14)) + ((((((int)threadIdx.z) * 16) + (((((int)threadIdx.x) * 19) + 9) >> 4)) % 3) * 14)) + (((((int)threadIdx.x) * 19) + 9) & 15)) - 15))] : 0.000000e+00f);
}
}
}
if (((((int)threadIdx.z) * 16) + (((((int)threadIdx.x) * 19) + 10) >> 4)) < 48) {
if (((((int)threadIdx.z) * 256) + (((int)threadIdx.x) * 19)) < 758) {
if (((int)threadIdx.x) < 13) {
pad_temp_shared[((((((int)threadIdx.z) * 256) + (((int)threadIdx.x) * 19)) + 10))] = (((((1 <= (((int)blockIdx.y) + (((((int)threadIdx.z) * 16) + (((((int)threadIdx.x) * 19) + 10) >> 4)) % 3))) && ((((int)blockIdx.y) + (((((int)threadIdx.z) * 16) + (((((int)threadIdx.x) * 19) + 10) >> 4)) % 3)) < 15)) && (1 <= (((((int)threadIdx.x) * 19) + 10) & 15))) && ((((((int)threadIdx.x) * 19) + 10) & 15) < 15)) ? data[(((((((rc_outer * 3136) + ((((((int)threadIdx.z) * 16) + (((((int)threadIdx.x) * 19) + 10) >> 4)) / 3) * 196)) + (((int)blockIdx.y) * 14)) + ((((((int)threadIdx.z) * 16) + (((((int)threadIdx.x) * 19) + 10) >> 4)) % 3) * 14)) + (((((int)threadIdx.x) * 19) + 10) & 15)) - 15))] : 0.000000e+00f);
}
}
}
if (((((int)threadIdx.z) * 16) + (((((int)threadIdx.x) * 19) + 11) >> 4)) < 48) {
if (((((int)threadIdx.z) * 256) + (((int)threadIdx.x) * 19)) < 757) {
if (((int)threadIdx.x) < 13) {
pad_temp_shared[((((((int)threadIdx.z) * 256) + (((int)threadIdx.x) * 19)) + 11))] = (((((1 <= (((int)blockIdx.y) + (((((int)threadIdx.z) * 16) + (((((int)threadIdx.x) * 19) + 11) >> 4)) % 3))) && ((((int)blockIdx.y) + (((((int)threadIdx.z) * 16) + (((((int)threadIdx.x) * 19) + 11) >> 4)) % 3)) < 15)) && (1 <= (((((int)threadIdx.x) * 19) + 11) & 15))) && ((((((int)threadIdx.x) * 19) + 11) & 15) < 15)) ? data[(((((((rc_outer * 3136) + ((((((int)threadIdx.z) * 16) + (((((int)threadIdx.x) * 19) + 11) >> 4)) / 3) * 196)) + (((int)blockIdx.y) * 14)) + ((((((int)threadIdx.z) * 16) + (((((int)threadIdx.x) * 19) + 11) >> 4)) % 3) * 14)) + (((((int)threadIdx.x) * 19) + 11) & 15)) - 15))] : 0.000000e+00f);
}
}
}
if (((((int)threadIdx.z) * 16) + (((((int)threadIdx.x) * 19) + 12) >> 4)) < 48) {
if (((((int)threadIdx.z) * 256) + (((int)threadIdx.x) * 19)) < 756) {
if (((int)threadIdx.x) < 13) {
pad_temp_shared[((((((int)threadIdx.z) * 256) + (((int)threadIdx.x) * 19)) + 12))] = (((((1 <= (((int)blockIdx.y) + (((((int)threadIdx.z) * 16) + (((((int)threadIdx.x) * 19) + 12) >> 4)) % 3))) && ((((int)blockIdx.y) + (((((int)threadIdx.z) * 16) + (((((int)threadIdx.x) * 19) + 12) >> 4)) % 3)) < 15)) && (1 <= (((((int)threadIdx.x) * 19) + 12) & 15))) && ((((((int)threadIdx.x) * 19) + 12) & 15) < 15)) ? data[(((((((rc_outer * 3136) + ((((((int)threadIdx.z) * 16) + (((((int)threadIdx.x) * 19) + 12) >> 4)) / 3) * 196)) + (((int)blockIdx.y) * 14)) + ((((((int)threadIdx.z) * 16) + (((((int)threadIdx.x) * 19) + 12) >> 4)) % 3) * 14)) + (((((int)threadIdx.x) * 19) + 12) & 15)) - 15))] : 0.000000e+00f);
}
}
}
if (((((int)threadIdx.z) * 16) + (((((int)threadIdx.x) * 19) + 13) >> 4)) < 48) {
if (((((int)threadIdx.z) * 256) + (((int)threadIdx.x) * 19)) < 755) {
if (((int)threadIdx.x) < 13) {
pad_temp_shared[((((((int)threadIdx.z) * 256) + (((int)threadIdx.x) * 19)) + 13))] = (((((1 <= (((int)blockIdx.y) + (((((int)threadIdx.z) * 16) + (((((int)threadIdx.x) * 19) + 13) >> 4)) % 3))) && ((((int)blockIdx.y) + (((((int)threadIdx.z) * 16) + (((((int)threadIdx.x) * 19) + 13) >> 4)) % 3)) < 15)) && (1 <= (((((int)threadIdx.x) * 19) + 13) & 15))) && ((((((int)threadIdx.x) * 19) + 13) & 15) < 15)) ? data[(((((((rc_outer * 3136) + ((((((int)threadIdx.z) * 16) + (((((int)threadIdx.x) * 19) + 13) >> 4)) / 3) * 196)) + (((int)blockIdx.y) * 14)) + ((((((int)threadIdx.z) * 16) + (((((int)threadIdx.x) * 19) + 13) >> 4)) % 3) * 14)) + (((((int)threadIdx.x) * 19) + 13) & 15)) - 15))] : 0.000000e+00f);
}
}
}
if (((((int)threadIdx.z) * 16) + (((((int)threadIdx.x) * 19) + 14) >> 4)) < 48) {
if (((((int)threadIdx.z) * 256) + (((int)threadIdx.x) * 19)) < 754) {
if (((int)threadIdx.x) < 13) {
pad_temp_shared[((((((int)threadIdx.z) * 256) + (((int)threadIdx.x) * 19)) + 14))] = (((((1 <= (((int)blockIdx.y) + (((((int)threadIdx.z) * 16) + (((((int)threadIdx.x) * 19) + 14) >> 4)) % 3))) && ((((int)blockIdx.y) + (((((int)threadIdx.z) * 16) + (((((int)threadIdx.x) * 19) + 14) >> 4)) % 3)) < 15)) && (1 <= (((((int)threadIdx.x) * 19) + 14) & 15))) && ((((((int)threadIdx.x) * 19) + 14) & 15) < 15)) ? data[(((((((rc_outer * 3136) + ((((((int)threadIdx.z) * 16) + (((((int)threadIdx.x) * 19) + 14) >> 4)) / 3) * 196)) + (((int)blockIdx.y) * 14)) + ((((((int)threadIdx.z) * 16) + (((((int)threadIdx.x) * 19) + 14) >> 4)) % 3) * 14)) + (((((int)threadIdx.x) * 19) + 14) & 15)) - 15))] : 0.000000e+00f);
}
}
}
if (((((int)threadIdx.z) * 16) + (((((int)threadIdx.x) * 19) + 15) >> 4)) < 48) {
if (((((int)threadIdx.z) * 256) + (((int)threadIdx.x) * 19)) < 753) {
if (((int)threadIdx.x) < 13) {
pad_temp_shared[((((((int)threadIdx.z) * 256) + (((int)threadIdx.x) * 19)) + 15))] = (((((1 <= (((int)blockIdx.y) + (((((int)threadIdx.z) * 16) + (((((int)threadIdx.x) * 19) + 15) >> 4)) % 3))) && ((((int)blockIdx.y) + (((((int)threadIdx.z) * 16) + (((((int)threadIdx.x) * 19) + 15) >> 4)) % 3)) < 15)) && (1 <= (((((int)threadIdx.x) * 19) + 15) & 15))) && ((((((int)threadIdx.x) * 19) + 15) & 15) < 15)) ? data[(((((((rc_outer * 3136) + ((((((int)threadIdx.z) * 16) + (((((int)threadIdx.x) * 19) + 15) >> 4)) / 3) * 196)) + (((int)blockIdx.y) * 14)) + ((((((int)threadIdx.z) * 16) + (((((int)threadIdx.x) * 19) + 15) >> 4)) % 3) * 14)) + (((((int)threadIdx.x) * 19) + 15) & 15)) - 15))] : 0.000000e+00f);
}
}
}
if (((((int)threadIdx.z) * 16) + ((((int)threadIdx.x) * 19) >> 4)) < 47) {
if (((((int)threadIdx.z) * 256) + (((int)threadIdx.x) * 19)) < 752) {
if (((int)threadIdx.x) < 13) {
pad_temp_shared[((((((int)threadIdx.z) * 256) + (((int)threadIdx.x) * 19)) + 16))] = (((((1 <= (((int)blockIdx.y) + ((((((int)threadIdx.z) * 16) + ((((int)threadIdx.x) * 19) >> 4)) + 1) % 3))) && ((((int)blockIdx.y) + ((((((int)threadIdx.z) * 16) + ((((int)threadIdx.x) * 19) >> 4)) + 1) % 3)) < 15)) && (1 <= ((((int)threadIdx.x) * 19) & 15))) && (((((int)threadIdx.x) * 19) & 15) < 15)) ? data[(((((((rc_outer * 3136) + (((((((int)threadIdx.z) * 16) + ((((int)threadIdx.x) * 19) >> 4)) + 1) / 3) * 196)) + (((int)blockIdx.y) * 14)) + (((((((int)threadIdx.z) * 16) + ((((int)threadIdx.x) * 19) >> 4)) + 1) % 3) * 14)) + ((((int)threadIdx.x) * 19) & 15)) - 15))] : 0.000000e+00f);
}
}
}
if (((((int)threadIdx.z) * 16) + (((((int)threadIdx.x) * 19) + 17) >> 4)) < 48) {
if (((((int)threadIdx.z) * 256) + (((int)threadIdx.x) * 19)) < 751) {
if (((int)threadIdx.x) < 13) {
pad_temp_shared[((((((int)threadIdx.z) * 256) + (((int)threadIdx.x) * 19)) + 17))] = (((((1 <= (((int)blockIdx.y) + (((((int)threadIdx.z) * 16) + (((((int)threadIdx.x) * 19) + 17) >> 4)) % 3))) && ((((int)blockIdx.y) + (((((int)threadIdx.z) * 16) + (((((int)threadIdx.x) * 19) + 17) >> 4)) % 3)) < 15)) && (1 <= (((((int)threadIdx.x) * 19) + 1) & 15))) && ((((((int)threadIdx.x) * 19) + 1) & 15) < 15)) ? data[(((((((rc_outer * 3136) + ((((((int)threadIdx.z) * 16) + (((((int)threadIdx.x) * 19) + 17) >> 4)) / 3) * 196)) + (((int)blockIdx.y) * 14)) + ((((((int)threadIdx.z) * 16) + (((((int)threadIdx.x) * 19) + 17) >> 4)) % 3) * 14)) + (((((int)threadIdx.x) * 19) + 1) & 15)) - 15))] : 0.000000e+00f);
}
}
}
if (((((int)threadIdx.z) * 16) + (((((int)threadIdx.x) * 19) + 18) >> 4)) < 48) {
if (((((int)threadIdx.z) * 256) + (((int)threadIdx.x) * 19)) < 750) {
if (((int)threadIdx.x) < 13) {
pad_temp_shared[((((((int)threadIdx.z) * 256) + (((int)threadIdx.x) * 19)) + 18))] = (((((1 <= (((int)blockIdx.y) + (((((int)threadIdx.z) * 16) + (((((int)threadIdx.x) * 19) + 18) >> 4)) % 3))) && ((((int)blockIdx.y) + (((((int)threadIdx.z) * 16) + (((((int)threadIdx.x) * 19) + 18) >> 4)) % 3)) < 15)) && (1 <= (((((int)threadIdx.x) * 19) + 2) & 15))) && ((((((int)threadIdx.x) * 19) + 2) & 15) < 15)) ? data[(((((((rc_outer * 3136) + ((((((int)threadIdx.z) * 16) + (((((int)threadIdx.x) * 19) + 18) >> 4)) / 3) * 196)) + (((int)blockIdx.y) * 14)) + ((((((int)threadIdx.z) * 16) + (((((int)threadIdx.x) * 19) + 18) >> 4)) % 3) * 14)) + (((((int)threadIdx.x) * 19) + 2) & 15)) - 15))] : 0.000000e+00f);
}
}
}
kernel_shared[(((((int)threadIdx.z) * 288) + (((int)threadIdx.x) * 21)))] = kernel[((((((((int)blockIdx.z) * 6912) + (((int)threadIdx.z) * 2304)) + (((((int)threadIdx.x) * 7) / 48) * 1152)) + (rc_outer * 144)) + (((((int)threadIdx.x) * 7) % 48) * 3)))];
kernel_shared[((((((int)threadIdx.z) * 288) + (((int)threadIdx.x) * 21)) + 1))] = kernel[(((((((((int)blockIdx.z) * 6912) + (((int)threadIdx.z) * 2304)) + (((((int)threadIdx.x) * 7) / 48) * 1152)) + (rc_outer * 144)) + (((((int)threadIdx.x) * 7) % 48) * 3)) + 1))];
kernel_shared[((((((int)threadIdx.z) * 288) + (((int)threadIdx.x) * 21)) + 2))] = kernel[(((((((((int)blockIdx.z) * 6912) + (((int)threadIdx.z) * 2304)) + (((((int)threadIdx.x) * 7) / 48) * 1152)) + (rc_outer * 144)) + (((((int)threadIdx.x) * 7) % 48) * 3)) + 2))];
kernel_shared[((((((int)threadIdx.z) * 288) + (((int)threadIdx.x) * 21)) + 3))] = kernel[((((((((int)blockIdx.z) * 6912) + (((int)threadIdx.z) * 2304)) + ((((((int)threadIdx.x) * 7) + 1) / 48) * 1152)) + (rc_outer * 144)) + ((((((int)threadIdx.x) * 7) + 1) % 48) * 3)))];
kernel_shared[((((((int)threadIdx.z) * 288) + (((int)threadIdx.x) * 21)) + 4))] = kernel[(((((((((int)blockIdx.z) * 6912) + (((int)threadIdx.z) * 2304)) + ((((((int)threadIdx.x) * 7) + 1) / 48) * 1152)) + (rc_outer * 144)) + ((((((int)threadIdx.x) * 7) + 1) % 48) * 3)) + 1))];
kernel_shared[((((((int)threadIdx.z) * 288) + (((int)threadIdx.x) * 21)) + 5))] = kernel[(((((((((int)blockIdx.z) * 6912) + (((int)threadIdx.z) * 2304)) + ((((((int)threadIdx.x) * 7) + 1) / 48) * 1152)) + (rc_outer * 144)) + ((((((int)threadIdx.x) * 7) + 1) % 48) * 3)) + 2))];
kernel_shared[((((((int)threadIdx.z) * 288) + (((int)threadIdx.x) * 21)) + 6))] = kernel[((((((((int)blockIdx.z) * 6912) + (((int)threadIdx.z) * 2304)) + ((((((int)threadIdx.x) * 7) + 2) / 48) * 1152)) + (rc_outer * 144)) + ((((((int)threadIdx.x) * 7) + 2) % 48) * 3)))];
kernel_shared[((((((int)threadIdx.z) * 288) + (((int)threadIdx.x) * 21)) + 7))] = kernel[(((((((((int)blockIdx.z) * 6912) + (((int)threadIdx.z) * 2304)) + ((((((int)threadIdx.x) * 7) + 2) / 48) * 1152)) + (rc_outer * 144)) + ((((((int)threadIdx.x) * 7) + 2) % 48) * 3)) + 1))];
kernel_shared[((((((int)threadIdx.z) * 288) + (((int)threadIdx.x) * 21)) + 8))] = kernel[(((((((((int)blockIdx.z) * 6912) + (((int)threadIdx.z) * 2304)) + ((((((int)threadIdx.x) * 7) + 2) / 48) * 1152)) + (rc_outer * 144)) + ((((((int)threadIdx.x) * 7) + 2) % 48) * 3)) + 2))];
kernel_shared[((((((int)threadIdx.z) * 288) + (((int)threadIdx.x) * 21)) + 9))] = kernel[((((((((int)blockIdx.z) * 6912) + (((int)threadIdx.z) * 2304)) + ((((((int)threadIdx.x) * 7) + 3) / 48) * 1152)) + (rc_outer * 144)) + ((((((int)threadIdx.x) * 7) + 3) % 48) * 3)))];
kernel_shared[((((((int)threadIdx.z) * 288) + (((int)threadIdx.x) * 21)) + 10))] = kernel[(((((((((int)blockIdx.z) * 6912) + (((int)threadIdx.z) * 2304)) + ((((((int)threadIdx.x) * 7) + 3) / 48) * 1152)) + (rc_outer * 144)) + ((((((int)threadIdx.x) * 7) + 3) % 48) * 3)) + 1))];
kernel_shared[((((((int)threadIdx.z) * 288) + (((int)threadIdx.x) * 21)) + 11))] = kernel[(((((((((int)blockIdx.z) * 6912) + (((int)threadIdx.z) * 2304)) + ((((((int)threadIdx.x) * 7) + 3) / 48) * 1152)) + (rc_outer * 144)) + ((((((int)threadIdx.x) * 7) + 3) % 48) * 3)) + 2))];
kernel_shared[((((((int)threadIdx.z) * 288) + (((int)threadIdx.x) * 21)) + 12))] = kernel[((((((((int)blockIdx.z) * 6912) + (((int)threadIdx.z) * 2304)) + ((((((int)threadIdx.x) * 7) + 4) / 48) * 1152)) + (rc_outer * 144)) + ((((((int)threadIdx.x) * 7) + 4) % 48) * 3)))];
kernel_shared[((((((int)threadIdx.z) * 288) + (((int)threadIdx.x) * 21)) + 13))] = kernel[(((((((((int)blockIdx.z) * 6912) + (((int)threadIdx.z) * 2304)) + ((((((int)threadIdx.x) * 7) + 4) / 48) * 1152)) + (rc_outer * 144)) + ((((((int)threadIdx.x) * 7) + 4) % 48) * 3)) + 1))];
kernel_shared[((((((int)threadIdx.z) * 288) + (((int)threadIdx.x) * 21)) + 14))] = kernel[(((((((((int)blockIdx.z) * 6912) + (((int)threadIdx.z) * 2304)) + ((((((int)threadIdx.x) * 7) + 4) / 48) * 1152)) + (rc_outer * 144)) + ((((((int)threadIdx.x) * 7) + 4) % 48) * 3)) + 2))];
if (((((int)threadIdx.z) * 2) + (((((int)threadIdx.x) * 7) + 5) / 48)) < 6) {
if (((((int)threadIdx.z) * 32) + (((((int)threadIdx.x) * 7) + 5) / 3)) < 96) {
if (((((int)threadIdx.z) * 96) + (((int)threadIdx.x) * 7)) < 283) {
if (((((int)threadIdx.z) * 288) + (((int)threadIdx.x) * 21)) < 849) {
if (((int)threadIdx.x) < 13) {
kernel_shared[((((((int)threadIdx.z) * 288) + (((int)threadIdx.x) * 21)) + 15))] = kernel[((((((((int)blockIdx.z) * 6912) + (((int)threadIdx.z) * 2304)) + ((((((int)threadIdx.x) * 7) + 5) / 48) * 1152)) + (rc_outer * 144)) + ((((((int)threadIdx.x) * 7) + 5) % 48) * 3)))];
}
}
}
}
}
if (((((int)threadIdx.z) * 2) + (((((int)threadIdx.x) * 7) + 5) / 48)) < 6) {
if (((((int)threadIdx.z) * 32) + (((((int)threadIdx.x) * 7) + 5) / 3)) < 96) {
if (((((int)threadIdx.z) * 96) + (((int)threadIdx.x) * 7)) < 283) {
if (((((int)threadIdx.z) * 288) + (((int)threadIdx.x) * 21)) < 848) {
if (((int)threadIdx.x) < 13) {
kernel_shared[((((((int)threadIdx.z) * 288) + (((int)threadIdx.x) * 21)) + 16))] = kernel[(((((((((int)blockIdx.z) * 6912) + (((int)threadIdx.z) * 2304)) + ((((((int)threadIdx.x) * 7) + 5) / 48) * 1152)) + (rc_outer * 144)) + ((((((int)threadIdx.x) * 7) + 5) % 48) * 3)) + 1))];
}
}
}
}
}
if (((((int)threadIdx.z) * 2) + (((((int)threadIdx.x) * 7) + 5) / 48)) < 6) {
if (((((int)threadIdx.z) * 32) + (((((int)threadIdx.x) * 7) + 5) / 3)) < 96) {
if (((((int)threadIdx.z) * 96) + (((int)threadIdx.x) * 7)) < 283) {
if (((((int)threadIdx.z) * 288) + (((int)threadIdx.x) * 21)) < 847) {
if (((int)threadIdx.x) < 13) {
kernel_shared[((((((int)threadIdx.z) * 288) + (((int)threadIdx.x) * 21)) + 17))] = kernel[(((((((((int)blockIdx.z) * 6912) + (((int)threadIdx.z) * 2304)) + ((((((int)threadIdx.x) * 7) + 5) / 48) * 1152)) + (rc_outer * 144)) + ((((((int)threadIdx.x) * 7) + 5) % 48) * 3)) + 2))];
}
}
}
}
}
if (((((int)threadIdx.z) * 2) + (((((int)threadIdx.x) * 7) + 6) / 48)) < 6) {
if (((((int)threadIdx.z) * 32) + ((((int)threadIdx.x) * 7) / 3)) < 94) {
if (((((int)threadIdx.z) * 96) + (((int)threadIdx.x) * 7)) < 282) {
if (((((int)threadIdx.z) * 288) + (((int)threadIdx.x) * 21)) < 846) {
if (((int)threadIdx.x) < 13) {
kernel_shared[((((((int)threadIdx.z) * 288) + (((int)threadIdx.x) * 21)) + 18))] = kernel[((((((((int)blockIdx.z) * 6912) + (((int)threadIdx.z) * 2304)) + ((((((int)threadIdx.x) * 7) + 6) / 48) * 1152)) + (rc_outer * 144)) + ((((((int)threadIdx.x) * 7) + 6) % 48) * 3)))];
}
}
}
}
}
if (((((int)threadIdx.z) * 2) + (((((int)threadIdx.x) * 7) + 6) / 48)) < 6) {
if (((((int)threadIdx.z) * 32) + ((((int)threadIdx.x) * 7) / 3)) < 94) {
if (((((int)threadIdx.z) * 96) + (((int)threadIdx.x) * 7)) < 282) {
if (((((int)threadIdx.z) * 288) + (((int)threadIdx.x) * 21)) < 845) {
if (((int)threadIdx.x) < 13) {
kernel_shared[((((((int)threadIdx.z) * 288) + (((int)threadIdx.x) * 21)) + 19))] = kernel[(((((((((int)blockIdx.z) * 6912) + (((int)threadIdx.z) * 2304)) + ((((((int)threadIdx.x) * 7) + 6) / 48) * 1152)) + (rc_outer * 144)) + ((((((int)threadIdx.x) * 7) + 6) % 48) * 3)) + 1))];
}
}
}
}
}
if (((((int)threadIdx.z) * 2) + (((((int)threadIdx.x) * 7) + 6) / 48)) < 6) {
if (((((int)threadIdx.z) * 32) + ((((int)threadIdx.x) * 7) / 3)) < 94) {
if (((((int)threadIdx.z) * 96) + (((int)threadIdx.x) * 7)) < 282) {
if (((((int)threadIdx.z) * 288) + (((int)threadIdx.x) * 21)) < 844) {
if (((int)threadIdx.x) < 13) {
kernel_shared[((((((int)threadIdx.z) * 288) + (((int)threadIdx.x) * 21)) + 20))] = kernel[(((((((((int)blockIdx.z) * 6912) + (((int)threadIdx.z) * 2304)) + ((((((int)threadIdx.x) * 7) + 6) / 48) * 1152)) + (rc_outer * 144)) + ((((((int)threadIdx.x) * 7) + 6) % 48) * 3)) + 2))];
}
}
}
}
}
__syncthreads();
for (int rc_inner_outer = 0; rc_inner_outer < 8; ++rc_inner_outer) {
pad_temp_shared_local[(0)] = pad_temp_shared[(((rc_inner_outer * 96) + ((int)threadIdx.x)))];
pad_temp_shared_local[(1)] = pad_temp_shared[((((rc_inner_outer * 96) + ((int)threadIdx.x)) + 1))];
pad_temp_shared_local[(2)] = pad_temp_shared[((((rc_inner_outer * 96) + ((int)threadIdx.x)) + 2))];
pad_temp_shared_local[(3)] = pad_temp_shared[((((rc_inner_outer * 96) + ((int)threadIdx.x)) + 48))];
pad_temp_shared_local[(4)] = pad_temp_shared[((((rc_inner_outer * 96) + ((int)threadIdx.x)) + 49))];
pad_temp_shared_local[(5)] = pad_temp_shared[((((rc_inner_outer * 96) + ((int)threadIdx.x)) + 50))];
kernel_shared_local[(0)] = kernel_shared[(((((int)threadIdx.z) * 144) + (rc_inner_outer * 18)))];
kernel_shared_local[(6)] = kernel_shared[((((((int)threadIdx.z) * 144) + (rc_inner_outer * 18)) + 432))];
kernel_shared_local[(1)] = kernel_shared[((((((int)threadIdx.z) * 144) + (rc_inner_outer * 18)) + 1))];
kernel_shared_local[(7)] = kernel_shared[((((((int)threadIdx.z) * 144) + (rc_inner_outer * 18)) + 433))];
kernel_shared_local[(2)] = kernel_shared[((((((int)threadIdx.z) * 144) + (rc_inner_outer * 18)) + 2))];
kernel_shared_local[(8)] = kernel_shared[((((((int)threadIdx.z) * 144) + (rc_inner_outer * 18)) + 434))];
kernel_shared_local[(3)] = kernel_shared[((((((int)threadIdx.z) * 144) + (rc_inner_outer * 18)) + 9))];
kernel_shared_local[(9)] = kernel_shared[((((((int)threadIdx.z) * 144) + (rc_inner_outer * 18)) + 441))];
kernel_shared_local[(4)] = kernel_shared[((((((int)threadIdx.z) * 144) + (rc_inner_outer * 18)) + 10))];
kernel_shared_local[(10)] = kernel_shared[((((((int)threadIdx.z) * 144) + (rc_inner_outer * 18)) + 442))];
kernel_shared_local[(5)] = kernel_shared[((((((int)threadIdx.z) * 144) + (rc_inner_outer * 18)) + 11))];
kernel_shared_local[(11)] = kernel_shared[((((((int)threadIdx.z) * 144) + (rc_inner_outer * 18)) + 443))];
compute_local[(0)] = (compute_local[(0)] + (pad_temp_shared_local[(0)] * kernel_shared_local[(0)]));
compute_local[(1)] = (compute_local[(1)] + (pad_temp_shared_local[(0)] * kernel_shared_local[(6)]));
compute_local[(0)] = (compute_local[(0)] + (pad_temp_shared_local[(1)] * kernel_shared_local[(1)]));
compute_local[(1)] = (compute_local[(1)] + (pad_temp_shared_local[(1)] * kernel_shared_local[(7)]));
compute_local[(0)] = (compute_local[(0)] + (pad_temp_shared_local[(2)] * kernel_shared_local[(2)]));
compute_local[(1)] = (compute_local[(1)] + (pad_temp_shared_local[(2)] * kernel_shared_local[(8)]));
compute_local[(0)] = (compute_local[(0)] + (pad_temp_shared_local[(3)] * kernel_shared_local[(3)]));
compute_local[(1)] = (compute_local[(1)] + (pad_temp_shared_local[(3)] * kernel_shared_local[(9)]));
compute_local[(0)] = (compute_local[(0)] + (pad_temp_shared_local[(4)] * kernel_shared_local[(4)]));
compute_local[(1)] = (compute_local[(1)] + (pad_temp_shared_local[(4)] * kernel_shared_local[(10)]));
compute_local[(0)] = (compute_local[(0)] + (pad_temp_shared_local[(5)] * kernel_shared_local[(5)]));
compute_local[(1)] = (compute_local[(1)] + (pad_temp_shared_local[(5)] * kernel_shared_local[(11)]));
pad_temp_shared_local[(0)] = pad_temp_shared[((((rc_inner_outer * 96) + ((int)threadIdx.x)) + 16))];
pad_temp_shared_local[(1)] = pad_temp_shared[((((rc_inner_outer * 96) + ((int)threadIdx.x)) + 17))];
pad_temp_shared_local[(2)] = pad_temp_shared[((((rc_inner_outer * 96) + ((int)threadIdx.x)) + 18))];
pad_temp_shared_local[(3)] = pad_temp_shared[((((rc_inner_outer * 96) + ((int)threadIdx.x)) + 64))];
pad_temp_shared_local[(4)] = pad_temp_shared[((((rc_inner_outer * 96) + ((int)threadIdx.x)) + 65))];
pad_temp_shared_local[(5)] = pad_temp_shared[((((rc_inner_outer * 96) + ((int)threadIdx.x)) + 66))];
kernel_shared_local[(0)] = kernel_shared[((((((int)threadIdx.z) * 144) + (rc_inner_outer * 18)) + 3))];
kernel_shared_local[(6)] = kernel_shared[((((((int)threadIdx.z) * 144) + (rc_inner_outer * 18)) + 435))];
kernel_shared_local[(1)] = kernel_shared[((((((int)threadIdx.z) * 144) + (rc_inner_outer * 18)) + 4))];
kernel_shared_local[(7)] = kernel_shared[((((((int)threadIdx.z) * 144) + (rc_inner_outer * 18)) + 436))];
kernel_shared_local[(2)] = kernel_shared[((((((int)threadIdx.z) * 144) + (rc_inner_outer * 18)) + 5))];
kernel_shared_local[(8)] = kernel_shared[((((((int)threadIdx.z) * 144) + (rc_inner_outer * 18)) + 437))];
kernel_shared_local[(3)] = kernel_shared[((((((int)threadIdx.z) * 144) + (rc_inner_outer * 18)) + 12))];
kernel_shared_local[(9)] = kernel_shared[((((((int)threadIdx.z) * 144) + (rc_inner_outer * 18)) + 444))];
kernel_shared_local[(4)] = kernel_shared[((((((int)threadIdx.z) * 144) + (rc_inner_outer * 18)) + 13))];
kernel_shared_local[(10)] = kernel_shared[((((((int)threadIdx.z) * 144) + (rc_inner_outer * 18)) + 445))];
kernel_shared_local[(5)] = kernel_shared[((((((int)threadIdx.z) * 144) + (rc_inner_outer * 18)) + 14))];
kernel_shared_local[(11)] = kernel_shared[((((((int)threadIdx.z) * 144) + (rc_inner_outer * 18)) + 446))];
compute_local[(0)] = (compute_local[(0)] + (pad_temp_shared_local[(0)] * kernel_shared_local[(0)]));
compute_local[(1)] = (compute_local[(1)] + (pad_temp_shared_local[(0)] * kernel_shared_local[(6)]));
compute_local[(0)] = (compute_local[(0)] + (pad_temp_shared_local[(1)] * kernel_shared_local[(1)]));
compute_local[(1)] = (compute_local[(1)] + (pad_temp_shared_local[(1)] * kernel_shared_local[(7)]));
compute_local[(0)] = (compute_local[(0)] + (pad_temp_shared_local[(2)] * kernel_shared_local[(2)]));
compute_local[(1)] = (compute_local[(1)] + (pad_temp_shared_local[(2)] * kernel_shared_local[(8)]));
compute_local[(0)] = (compute_local[(0)] + (pad_temp_shared_local[(3)] * kernel_shared_local[(3)]));
compute_local[(1)] = (compute_local[(1)] + (pad_temp_shared_local[(3)] * kernel_shared_local[(9)]));
compute_local[(0)] = (compute_local[(0)] + (pad_temp_shared_local[(4)] * kernel_shared_local[(4)]));
compute_local[(1)] = (compute_local[(1)] + (pad_temp_shared_local[(4)] * kernel_shared_local[(10)]));
compute_local[(0)] = (compute_local[(0)] + (pad_temp_shared_local[(5)] * kernel_shared_local[(5)]));
compute_local[(1)] = (compute_local[(1)] + (pad_temp_shared_local[(5)] * kernel_shared_local[(11)]));
pad_temp_shared_local[(0)] = pad_temp_shared[((((rc_inner_outer * 96) + ((int)threadIdx.x)) + 32))];
pad_temp_shared_local[(1)] = pad_temp_shared[((((rc_inner_outer * 96) + ((int)threadIdx.x)) + 33))];
pad_temp_shared_local[(2)] = pad_temp_shared[((((rc_inner_outer * 96) + ((int)threadIdx.x)) + 34))];
pad_temp_shared_local[(3)] = pad_temp_shared[((((rc_inner_outer * 96) + ((int)threadIdx.x)) + 80))];
pad_temp_shared_local[(4)] = pad_temp_shared[((((rc_inner_outer * 96) + ((int)threadIdx.x)) + 81))];
pad_temp_shared_local[(5)] = pad_temp_shared[((((rc_inner_outer * 96) + ((int)threadIdx.x)) + 82))];
kernel_shared_local[(0)] = kernel_shared[((((((int)threadIdx.z) * 144) + (rc_inner_outer * 18)) + 6))];
kernel_shared_local[(6)] = kernel_shared[((((((int)threadIdx.z) * 144) + (rc_inner_outer * 18)) + 438))];
kernel_shared_local[(1)] = kernel_shared[((((((int)threadIdx.z) * 144) + (rc_inner_outer * 18)) + 7))];
kernel_shared_local[(7)] = kernel_shared[((((((int)threadIdx.z) * 144) + (rc_inner_outer * 18)) + 439))];
kernel_shared_local[(2)] = kernel_shared[((((((int)threadIdx.z) * 144) + (rc_inner_outer * 18)) + 8))];
kernel_shared_local[(8)] = kernel_shared[((((((int)threadIdx.z) * 144) + (rc_inner_outer * 18)) + 440))];
kernel_shared_local[(3)] = kernel_shared[((((((int)threadIdx.z) * 144) + (rc_inner_outer * 18)) + 15))];
kernel_shared_local[(9)] = kernel_shared[((((((int)threadIdx.z) * 144) + (rc_inner_outer * 18)) + 447))];
kernel_shared_local[(4)] = kernel_shared[((((((int)threadIdx.z) * 144) + (rc_inner_outer * 18)) + 16))];
kernel_shared_local[(10)] = kernel_shared[((((((int)threadIdx.z) * 144) + (rc_inner_outer * 18)) + 448))];
kernel_shared_local[(5)] = kernel_shared[((((((int)threadIdx.z) * 144) + (rc_inner_outer * 18)) + 17))];
kernel_shared_local[(11)] = kernel_shared[((((((int)threadIdx.z) * 144) + (rc_inner_outer * 18)) + 449))];
compute_local[(0)] = (compute_local[(0)] + (pad_temp_shared_local[(0)] * kernel_shared_local[(0)]));
compute_local[(1)] = (compute_local[(1)] + (pad_temp_shared_local[(0)] * kernel_shared_local[(6)]));
compute_local[(0)] = (compute_local[(0)] + (pad_temp_shared_local[(1)] * kernel_shared_local[(1)]));
compute_local[(1)] = (compute_local[(1)] + (pad_temp_shared_local[(1)] * kernel_shared_local[(7)]));
compute_local[(0)] = (compute_local[(0)] + (pad_temp_shared_local[(2)] * kernel_shared_local[(2)]));
compute_local[(1)] = (compute_local[(1)] + (pad_temp_shared_local[(2)] * kernel_shared_local[(8)]));
compute_local[(0)] = (compute_local[(0)] + (pad_temp_shared_local[(3)] * kernel_shared_local[(3)]));
compute_local[(1)] = (compute_local[(1)] + (pad_temp_shared_local[(3)] * kernel_shared_local[(9)]));
compute_local[(0)] = (compute_local[(0)] + (pad_temp_shared_local[(4)] * kernel_shared_local[(4)]));
compute_local[(1)] = (compute_local[(1)] + (pad_temp_shared_local[(4)] * kernel_shared_local[(10)]));
compute_local[(0)] = (compute_local[(0)] + (pad_temp_shared_local[(5)] * kernel_shared_local[(5)]));
compute_local[(1)] = (compute_local[(1)] + (pad_temp_shared_local[(5)] * kernel_shared_local[(11)]));
}
}
compute[(((((((int)blockIdx.z) * 1176) + (((int)threadIdx.z) * 196)) + (((int)blockIdx.y) * 14)) + ((int)threadIdx.x)))] = compute_local[(0)];
compute[((((((((int)blockIdx.z) * 1176) + (((int)threadIdx.z) * 196)) + (((int)blockIdx.y) * 14)) + ((int)threadIdx.x)) + 588))] = compute_local[(1)];
}
class ConvGemm{
public:
float *cpuKernel;
float alpha = 1.0f;
float beta = 0.0f;
cudnnHandle_t convCudnn;
void* d_workspace{nullptr};
size_t workspace_bytes{0};
cudnnTensorDescriptor_t convInputDescriptor;
cudnnTensorDescriptor_t convOutputDescriptor;
cudnnFilterDescriptor_t convKernelDescriptor;
cudnnConvolutionDescriptor_t convDesc;
float *output;
float *kernel;
void initialize();
float *forward(float *input);
};
void ConvGemm::initialize(){
cudaMalloc(&kernel,sizeof(float)*C*N*9);
cudaMalloc(&this->output,sizeof(float)*N*H*W);
cudnnCreate(&convCudnn);
cudnnCreateTensorDescriptor(&convInputDescriptor);
cudnnSetTensor4dDescriptor(convInputDescriptor,
/*format=*/CUDNN_TENSOR_NCHW,
/*dataType=*/CUDNN_DATA_FLOAT,
/*batch_size=*/1,
/*channels=*/C,
/*image_height=*/H,
/*image_width=*/W);
cudnnCreateFilterDescriptor(&convKernelDescriptor);
cudnnSetFilter4dDescriptor(convKernelDescriptor,
/*dataType=*/CUDNN_DATA_FLOAT,
/*format=*/CUDNN_TENSOR_NCHW,
/*out_channels=*/N,
/*in_channels=*/C,
/*kernel_height=*/R,
/*kernel_width=*/S);
cudnnCreateConvolutionDescriptor(&convDesc);
cudnnSetConvolution2dDescriptor(convDesc,
/*pad_height=*/1,
/*pad_width=*/1,
/*vertical_stride=*/1,
/*horizontal_stride=*/1,
/*dilation_height=*/1,
/*dilation_width=*/1,
/*mode=*/CUDNN_CROSS_CORRELATION,
CUDNN_DATA_FLOAT);
int batch_size{0}, channels{0}, height{0}, width{0};
cudnnGetConvolution2dForwardOutputDim(convDesc,
convInputDescriptor,
convKernelDescriptor,
&batch_size,
&channels,
&height,
&width);
cudnnCreateTensorDescriptor(&convOutputDescriptor);
cudnnSetTensor4dDescriptor(convOutputDescriptor,
/*format=*/CUDNN_TENSOR_NCHW,
/*dataType=*/CUDNN_DATA_FLOAT,
/*batch_size=*/1,
/*channels=*/N,
/*image_height=*/H,
/*image_width=*/W);
cudnnGetConvolutionForwardWorkspaceSize(convCudnn,
convInputDescriptor,
convKernelDescriptor,
convDesc,
convOutputDescriptor,
CUDNN_CONVOLUTION_FWD_ALGO_IMPLICIT_GEMM,
&workspace_bytes);
cudaMalloc(&d_workspace, workspace_bytes);
unsigned int kernelSize = R*S*C*N;//kernel
this->cpuKernel = (float *)malloc(kernelSize*sizeof(float));
for(int i=0;i<kernelSize;++i){
this->cpuKernel[i] = 1.0f;
}
cudaMemcpy(kernel,cpuKernel,R*S*C*N*sizeof(float),cudaMemcpyHostToDevice);
free(cpuKernel);
}
float * ConvGemm::forward(float *input) {
cudaMemset(output, 0, 1*N*H*W*sizeof(float));
checkCUDNN(cudnnConvolutionForward(convCudnn,
&alpha,
convInputDescriptor,
input,
convKernelDescriptor,
kernel,
convDesc,
CUDNN_CONVOLUTION_FWD_ALGO_IMPLICIT_GEMM,
d_workspace,
workspace_bytes,
&beta,
convOutputDescriptor,
output));
return output;
}
class ConvWinogradeNon{
public:
float *cpuKernel;
float alpha = 1.0f;
float beta = 0.0f;
cudnnHandle_t convCudnn;
void* d_workspace{nullptr};
size_t workspace_bytes{0};
cudnnTensorDescriptor_t convInputDescriptor;
cudnnTensorDescriptor_t convOutputDescriptor;
cudnnFilterDescriptor_t convKernelDescriptor;
cudnnConvolutionDescriptor_t convDesc;
float *output;
float *kernel;
void initialize();
float *forward(float *input);
};
void ConvWinogradeNon::initialize(){
cudaMalloc(&kernel,sizeof(float)*C*N*9);
cudaMalloc(&this->output,sizeof(float)*N*H*W);
cudnnCreate(&convCudnn);
cudnnCreateTensorDescriptor(&convInputDescriptor);
cudnnSetTensor4dDescriptor(convInputDescriptor,
/*format=*/CUDNN_TENSOR_NCHW,
/*dataType=*/CUDNN_DATA_FLOAT,
/*batch_size=*/1,
/*channels=*/C,
/*image_height=*/H,
/*image_width=*/W);
cudnnCreateFilterDescriptor(&convKernelDescriptor);
cudnnSetFilter4dDescriptor(convKernelDescriptor,
/*dataType=*/CUDNN_DATA_FLOAT,
/*format=*/CUDNN_TENSOR_NCHW,
/*out_channels=*/N,
/*in_channels=*/C,
/*kernel_height=*/R,
/*kernel_width=*/S);
cudnnCreateConvolutionDescriptor(&convDesc);
cudnnSetConvolution2dDescriptor(convDesc,
/*pad_height=*/1,
/*pad_width=*/1,
/*vertical_stride=*/1,
/*horizontal_stride=*/1,
/*dilation_height=*/1,
/*dilation_width=*/1,
/*mode=*/CUDNN_CROSS_CORRELATION,
CUDNN_DATA_FLOAT);
int batch_size{0}, channels{0}, height{0}, width{0};
cudnnGetConvolution2dForwardOutputDim(convDesc,
convInputDescriptor,
convKernelDescriptor,
&batch_size,
&channels,
&height,
&width);
cudnnCreateTensorDescriptor(&convOutputDescriptor);
cudnnSetTensor4dDescriptor(convOutputDescriptor,
/*format=*/CUDNN_TENSOR_NCHW,
/*dataType=*/CUDNN_DATA_FLOAT,
/*batch_size=*/1,
/*channels=*/N,
/*image_height=*/H,
/*image_width=*/W);
cudnnGetConvolutionForwardWorkspaceSize(convCudnn,
convInputDescriptor,
convKernelDescriptor,
convDesc,
convOutputDescriptor,
CUDNN_CONVOLUTION_FWD_ALGO_WINOGRAD_NONFUSED,
&workspace_bytes);
cudaMalloc(&d_workspace, workspace_bytes);
unsigned int kernelSize = R*S*C*N;//kernel
this->cpuKernel = (float *)malloc(kernelSize*sizeof(float));
for(int i=0;i<kernelSize;++i){
this->cpuKernel[i] = 1.0f;
}
cudaMemcpy(kernel,cpuKernel,R*S*C*N*sizeof(float),cudaMemcpyHostToDevice);
free(cpuKernel);
}
float * ConvWinogradeNon::forward(float *input) {
cudaMemset(output, 0, 1*N*H*W*sizeof(float));
checkCUDNN(cudnnConvolutionForward(convCudnn,
&alpha,
convInputDescriptor,
input,
convKernelDescriptor,
kernel,
convDesc,
CUDNN_CONVOLUTION_FWD_ALGO_WINOGRAD_NONFUSED,
d_workspace,
workspace_bytes,
&beta,
convOutputDescriptor,
output));
return output;
}
class ConvFFT{
public:
float *cpuKernel;
float alpha = 1.0f;
float beta = 0.0f;
cudnnHandle_t convCudnn;
void* d_workspace{nullptr};
size_t workspace_bytes{0};
cudnnTensorDescriptor_t convInputDescriptor;
cudnnTensorDescriptor_t convOutputDescriptor;
cudnnFilterDescriptor_t convKernelDescriptor;
cudnnConvolutionDescriptor_t convDesc;
float *output;
float *kernel;
void initialize();
float *forward(float *input);
};
void ConvFFT::initialize(){
cudaMalloc(&kernel,sizeof(float)*C*N*9);
cudaMalloc(&this->output,sizeof(float)*N*H*W);
cudnnCreate(&convCudnn);
cudnnCreateTensorDescriptor(&convInputDescriptor);
cudnnSetTensor4dDescriptor(convInputDescriptor,
/*format=*/CUDNN_TENSOR_NCHW,
/*dataType=*/CUDNN_DATA_FLOAT,
/*batch_size=*/1,
/*channels=*/C,
/*image_height=*/H,
/*image_width=*/W);
cudnnCreateFilterDescriptor(&convKernelDescriptor);
cudnnSetFilter4dDescriptor(convKernelDescriptor,
/*dataType=*/CUDNN_DATA_FLOAT,
/*format=*/CUDNN_TENSOR_NCHW,
/*out_channels=*/N,
/*in_channels=*/C,
/*kernel_height=*/R,
/*kernel_width=*/S);
cudnnCreateConvolutionDescriptor(&convDesc);
cudnnSetConvolution2dDescriptor(convDesc,
/*pad_height=*/1,
/*pad_width=*/1,
/*vertical_stride=*/1,
/*horizontal_stride=*/1,
/*dilation_height=*/1,
/*dilation_width=*/1,
/*mode=*/CUDNN_CROSS_CORRELATION,
CUDNN_DATA_FLOAT);
int batch_size{0}, channels{0}, height{0}, width{0};
cudnnGetConvolution2dForwardOutputDim(convDesc,
convInputDescriptor,
convKernelDescriptor,
&batch_size,
&channels,
&height,
&width);
cudnnCreateTensorDescriptor(&convOutputDescriptor);
cudnnSetTensor4dDescriptor(convOutputDescriptor,
/*format=*/CUDNN_TENSOR_NCHW,
/*dataType=*/CUDNN_DATA_FLOAT,
/*batch_size=*/1,
/*channels=*/N,
/*image_height=*/H,
/*image_width=*/W);
cudnnGetConvolutionForwardWorkspaceSize(convCudnn,
convInputDescriptor,
convKernelDescriptor,
convDesc,
convOutputDescriptor,
CUDNN_CONVOLUTION_FWD_ALGO_FFT,
&workspace_bytes);
cudaMalloc(&d_workspace, workspace_bytes);
unsigned int kernelSize = R*S*C*N;//kernel
this->cpuKernel = (float *)malloc(kernelSize*sizeof(float));
for(int i=0;i<kernelSize;++i){
this->cpuKernel[i] = 1.0f;
}
cudaMemcpy(kernel,cpuKernel,R*S*C*N*sizeof(float),cudaMemcpyHostToDevice);
free(cpuKernel);
}
float * ConvFFT::forward(float *input) {
cudaMemset(output, 0, 1*N*H*W*sizeof(float));
checkCUDNN(cudnnConvolutionForward(convCudnn,
&alpha,
convInputDescriptor,
input,
convKernelDescriptor,
kernel,
convDesc,
CUDNN_CONVOLUTION_FWD_ALGO_FFT,
d_workspace,
workspace_bytes,
&beta,
convOutputDescriptor,
output));
return output;
}
__device__ void load_input_2_shared_memory(float *input, float *shared_input, unsigned int h_start,
unsigned int h_end, unsigned int h_offset, unsigned int c_start,
unsigned int warp_id, unsigned int lane_id, unsigned int warp_size){
switch(h_offset){
case 0:
for(unsigned int c = warp_id; c<TC; c+=TWS){
for(unsigned int i=lane_id; i<(h_end - h_start) * W; i+=warp_size){
unsigned int r = i/W;
unsigned int s = i%W;
shared_input[c*(TH + 2)*(WPAD) + r * WPAD + s + 1] = input[(c_start + c) * H * W + h_start * W + i];
}
}
break;
case 1:
for(unsigned int c = warp_id; c<TC; c+=TWS){
for(unsigned int i=lane_id; i<(h_end - h_start) * W; i+=warp_size){
unsigned int r = i/W;
unsigned int s = i%W;
shared_input[c*(TH + 2)*(WPAD) + (1 + r) * WPAD + s + 1] = input[(c_start + c) * H * W + h_start * W + i];
}
}
break;
}
}
__device__ __forceinline__ void switch_write_back(unsigned int write_h, unsigned int write_w, unsigned int h_out_start, unsigned int w_out_start, unsigned int n, float * outputs, float * temp_result){
switch(write_h){
case 1:
switch(write_w){
case 1:
#pragma unroll
for (unsigned int th = 0; th < 1; ++th) {
#pragma unroll
for (unsigned int tw = 0; tw < 1; ++tw) {
atomicAdd(&outputs[n*H*W+(h_out_start + th) * W+(w_out_start + tw)],temp_result[(th * TW + tw)]);
}
}
break;
case 2:
#pragma unroll
for (unsigned int th = 0; th < 1; ++th) {
#pragma unroll
for (unsigned int tw = 0; tw < 2; ++tw) {
atomicAdd(&outputs[n*H*W+(h_out_start + th) * W+(w_out_start + tw)],temp_result[(th * TW + tw)]);
}
}
break;
}
break;
case 2:
switch(write_w){
case 1:
#pragma unroll
for (unsigned int th = 0; th < 2; ++th) {
#pragma unroll
for (unsigned int tw = 0; tw < 1; ++tw) {
atomicAdd(&outputs[n*H*W+(h_out_start + th) * W+(w_out_start + tw)],temp_result[(th * TW + tw)]);
}
}
break;
case 2:
#pragma unroll
for (unsigned int th = 0; th < 2; ++th) {
#pragma unroll
for (unsigned int tw = 0; tw < 2; ++tw) {
atomicAdd(&outputs[n*H*W+(h_out_start + th) * W+(w_out_start + tw)],temp_result[(th * TW + tw)]);
}
}
break;
}
break;
}
}
__global__ void conv2d(float * __restrict__ input,const float * __restrict__ kernel, float * __restrict__ outputs){
extern __shared__ float shared_input[];
const unsigned int tile_id = blockIdx.x;
const unsigned int tc_id = tile_id / THS;
const unsigned int th_id = tile_id % THS;
const unsigned int tw_id = threadIdx.x / N;
const int h_out_start = th_id * TH;
const int w_out_start = tw_id * TW;
const unsigned int warp_id = tw_id;
const unsigned int lane_id = threadIdx.x % N;
float data_array[9];
float temp_result[TH*TW] = {0.0f};
for(unsigned int i=threadIdx.x;i<TC*(TH+2)*WPAD;i+=blockDim.x){
shared_input[i] = 0.0f;
}
unsigned int n = lane_id;
unsigned int c_offset = tc_id * TC;
int h_offset = (h_out_start == 0)?1:0;
int h_padded_start = h_out_start;
int h_padded_end = min(h_padded_start + TH + 2, H + 2);
int h_non_padded_start = max(h_out_start - 1, 0);
int h_non_padded_end = min(H, h_padded_end - 1);
__syncthreads();
load_input_2_shared_memory(input, shared_input, h_non_padded_start, h_non_padded_end, h_offset, c_offset, warp_id, lane_id, N);
__syncthreads();
#pragma unroll
for(unsigned int c=0;c<TC;c++){
#pragma unroll
for(unsigned int r=0;r<R;++r){
#pragma unroll
for(unsigned int s=0;s<S;++s){
data_array[r*S+s] = kernel[(c + c_offset)*N*9+r*3*N+s*N+n];
}
}
temp_result[0] += shared_input[c*(TH+2)*(WPAD) + 0 * WPAD + tw_id * TW + 0]*data_array[0];
temp_result[1] += shared_input[c*(TH+2)*(WPAD) + 0 * WPAD + tw_id * TW + 1]*data_array[0];
temp_result[0] += shared_input[c*(TH+2)*(WPAD) + 0 * WPAD + tw_id * TW + 1]*data_array[1];
temp_result[1] += shared_input[c*(TH+2)*(WPAD) + 0 * WPAD + tw_id * TW + 2]*data_array[1];
temp_result[0] += shared_input[c*(TH+2)*(WPAD) + 0 * WPAD + tw_id * TW + 2]*data_array[2];
temp_result[1] += shared_input[c*(TH+2)*(WPAD) + 0 * WPAD + tw_id * TW + 3]*data_array[2];
temp_result[2] += shared_input[c*(TH+2)*(WPAD) + 1 * WPAD + tw_id * TW + 0]*data_array[0];
temp_result[0] += shared_input[c*(TH+2)*(WPAD) + 1 * WPAD + tw_id * TW + 0]*data_array[3];
temp_result[3] += shared_input[c*(TH+2)*(WPAD) + 1 * WPAD + tw_id * TW + 1]*data_array[0];
temp_result[2] += shared_input[c*(TH+2)*(WPAD) + 1 * WPAD + tw_id * TW + 1]*data_array[1];
temp_result[1] += shared_input[c*(TH+2)*(WPAD) + 1 * WPAD + tw_id * TW + 1]*data_array[3];
temp_result[0] += shared_input[c*(TH+2)*(WPAD) + 1 * WPAD + tw_id * TW + 1]*data_array[4];
temp_result[3] += shared_input[c*(TH+2)*(WPAD) + 1 * WPAD + tw_id * TW + 2]*data_array[1];
temp_result[2] += shared_input[c*(TH+2)*(WPAD) + 1 * WPAD + tw_id * TW + 2]*data_array[2];
temp_result[1] += shared_input[c*(TH+2)*(WPAD) + 1 * WPAD + tw_id * TW + 2]*data_array[4];
temp_result[0] += shared_input[c*(TH+2)*(WPAD) + 1 * WPAD + tw_id * TW + 2]*data_array[5];
temp_result[3] += shared_input[c*(TH+2)*(WPAD) + 1 * WPAD + tw_id * TW + 3]*data_array[2];
temp_result[1] += shared_input[c*(TH+2)*(WPAD) + 1 * WPAD + tw_id * TW + 3]*data_array[5];
temp_result[2] += shared_input[c*(TH+2)*(WPAD) + 2 * WPAD + tw_id * TW + 0]*data_array[3];
temp_result[0] += shared_input[c*(TH+2)*(WPAD) + 2 * WPAD + tw_id * TW + 0]*data_array[6];
temp_result[3] += shared_input[c*(TH+2)*(WPAD) + 2 * WPAD + tw_id * TW + 1]*data_array[3];
temp_result[2] += shared_input[c*(TH+2)*(WPAD) + 2 * WPAD + tw_id * TW + 1]*data_array[4];
temp_result[1] += shared_input[c*(TH+2)*(WPAD) + 2 * WPAD + tw_id * TW + 1]*data_array[6];
temp_result[0] += shared_input[c*(TH+2)*(WPAD) + 2 * WPAD + tw_id * TW + 1]*data_array[7];
temp_result[3] += shared_input[c*(TH+2)*(WPAD) + 2 * WPAD + tw_id * TW + 2]*data_array[4];
temp_result[2] += shared_input[c*(TH+2)*(WPAD) + 2 * WPAD + tw_id * TW + 2]*data_array[5];
temp_result[1] += shared_input[c*(TH+2)*(WPAD) + 2 * WPAD + tw_id * TW + 2]*data_array[7];
temp_result[0] += shared_input[c*(TH+2)*(WPAD) + 2 * WPAD + tw_id * TW + 2]*data_array[8];
temp_result[3] += shared_input[c*(TH+2)*(WPAD) + 2 * WPAD + tw_id * TW + 3]*data_array[5];
temp_result[1] += shared_input[c*(TH+2)*(WPAD) + 2 * WPAD + tw_id * TW + 3]*data_array[8];
temp_result[2] += shared_input[c*(TH+2)*(WPAD) + 3 * WPAD + tw_id * TW + 0]*data_array[6];
temp_result[3] += shared_input[c*(TH+2)*(WPAD) + 3 * WPAD + tw_id * TW + 1]*data_array[6];
temp_result[2] += shared_input[c*(TH+2)*(WPAD) + 3 * WPAD + tw_id * TW + 1]*data_array[7];
temp_result[3] += shared_input[c*(TH+2)*(WPAD) + 3 * WPAD + tw_id * TW + 2]*data_array[7];
temp_result[2] += shared_input[c*(TH+2)*(WPAD) + 3 * WPAD + tw_id * TW + 2]*data_array[8];
temp_result[3] += shared_input[c*(TH+2)*(WPAD) + 3 * WPAD + tw_id * TW + 3]*data_array[8];
}
switch_write_back(min(TH, H - h_out_start), min(TW, W - w_out_start), h_out_start, w_out_start, n, outputs, temp_result);
}
float check_diff(float *x, float *y, unsigned int size){
float diff = 0.0f;
#pragma omp parallel for reduction(+ : diff)
for(unsigned int i=0;i<size;++i){
diff += abs(x[i] - y[i]);
}
return diff;
}
int main(void){
float *input = new float[C*H*W];
time_t t;
float *matrix;
cudaMalloc(&matrix,C*(TH+2)*(TW+2)*THS*TWS*sizeof(float));
cudaMemset(matrix,0,C*(TH+2)*(TW+2)*THS*TWS*sizeof(float));
srand((unsigned) time(&t));
for(int i =0;i<C*H*W;++i){
input[i] = rand() % 10;
}
float *device_input;
cudaMalloc(&device_input,C*H*W*sizeof(float));
cudaMemcpy(device_input,input,C*H*W*sizeof(float),cudaMemcpyHostToDevice);
float *K = new float[C*N*9];
for(int i=0;i<C*N*9;++i){
K[i] = 1.0f;
}
ConvGemm convGemm;
convGemm.initialize();
ConvWinogradeNon convWinogradeNon;
convWinogradeNon.initialize();
ConvFFT convFFT;
convFFT.initialize();
float *out_cudnn;
float *out_cudnn_host = new float[N*H*W];
cudaEvent_t event_start;
cudaEvent_t event_stop;
cudaEventCreate(&event_start);
cudaEventCreate(&event_stop);
out_cudnn = convGemm.forward(device_input);
cudaMemcpy(out_cudnn_host,out_cudnn,N*H*W*sizeof(float),cudaMemcpyDeviceToHost);
out_cudnn = convFFT.forward(device_input);
out_cudnn = convWinogradeNon.forward(device_input);
float *device_K;
float *device_out;
cudaMalloc(&device_out,H*W*N*sizeof(float));
cudaMemset(device_out,0,H*W*N*sizeof(float));
cudaMalloc(&device_K,C*N*9*sizeof(float));
cudaMemcpy(device_K,K,C*N*9*sizeof(float),cudaMemcpyHostToDevice);
cudaEventRecord(event_start);
convGemm.forward(device_input);
cudaEventRecord(event_stop);
cudaEventSynchronize(event_stop);
float cudnnGemmTime;
cudaEventElapsedTime(&cudnnGemmTime, event_start, event_stop);
cudaEventRecord(event_start);
convWinogradeNon.forward(device_input);
cudaEventRecord(event_stop);
cudaEventSynchronize(event_stop);
float cudnnWinogradeTimeNon;
cudaEventElapsedTime(&cudnnWinogradeTimeNon, event_start, event_stop);
cudaEventRecord(event_start);
convFFT.forward(device_input);
cudaEventRecord(event_stop);
cudaEventSynchronize(event_stop);
float cudnnFFTTime;
cudaEventElapsedTime(&cudnnFFTTime, event_start, event_stop);
dim3 grid(1,14,16);
dim3 block(14,1,3);
cudaEventRecord(event_start);
default_function_kernel0<<<grid, block>>>(device_input, device_K, device_out);
cudaEventRecord(event_stop);
cudaEventSynchronize(event_stop);
float time_tvm;
cudaEventElapsedTime(&time_tvm, event_start, event_stop);
float *out_tvm = new float[N*H*W];
cudaMemcpy(out_tvm,device_out,N*H*W*sizeof(float),cudaMemcpyDeviceToHost);
cudaMemset(device_out, 0, sizeof(float)*N*H*W);
chkerr(cudaFuncSetAttribute(conv2d,cudaFuncAttributeMaxDynamicSharedMemorySize, TC*(TH+2)*(WPAD)*4));
cudaEventRecord(event_start);
conv2d<<<TCS*THS, N * TWS, TC*(TH+2)*(WPAD)*4>>>(device_input, device_K, device_out);
cudaEventRecord(event_stop);
cudaEventSynchronize(event_stop);
float time_tdc;
cudaEventElapsedTime(&time_tdc, event_start, event_stop);
float *out_tdc = new float[N*H*W];
cudaMemcpy(out_tdc,device_out,N*H*W*sizeof(float),cudaMemcpyDeviceToHost);
ofstream outfile;
char buffer[1000];
int ret = sprintf(buffer,"%d,%d,%d,%d,%f,%f,%f,%f,%f,%f,%f,%f,%f\n",N,C,H,W,
cudnnFFTTime,cudnnWinogradeTimeNon,cudnnGemmTime,time_tvm,time_tdc,
cudnnFFTTime/time_tdc,cudnnWinogradeTimeNon/time_tdc,cudnnGemmTime/time_tdc,time_tvm/time_tdc);
outfile.open("../../evaluation_outcome/2080Ti-layers-eval-modeling.csv", std::ios_base::app);
outfile << buffer;
float difference = check_diff(out_cudnn_host, out_tdc, N*H*W);
cout<<N<<","<<C<<","<<H<<","<<W<<","<<cudnnFFTTime<<","<<cudnnWinogradeTimeNon<<","<<cudnnGemmTime<<","<<
time_tvm<<","<<time_tdc<<","<<cudnnFFTTime/time_tdc<<","<<cudnnWinogradeTimeNon/time_tdc<<","<<
cudnnGemmTime/time_tdc<<","<<time_tvm/time_tdc<<endl;
return 0;
}
|
fdcb5146aceb56ddacea212f085feb8930906149.hip | // !!! This is a file automatically generated by hipify!!!
#include <hip/hip_runtime.h>
#include <helper_cuda.h>
#include <stdio.h>
#include <stdlib.h>
#include <string>
#include <iostream>
#include <fstream>
#define ASIZE 256
#define PRIME 1000009
__global__ void processPattern(char* x ,int m, int shifts[]) {
int idx = blockIdx.x * blockDim.x + threadIdx.x;
if ( idx >= m ) return;
char c = x[idx];
for( int i = m - 1; i >= idx; --i ) {
if ( x[i] == c ) {
shifts[c] = m - i;
return;
}
}
}
__global__ void compare(int idx,char *x, char *y, int m, int* results) {
int id = blockIdx.x * blockDim.x + threadIdx.x;
// printf("%d\t%d\n",idx,id);
if(x[id]!=y[idx+id]) {
results[idx]=0;
return;
} else {
return;
}
}
__global__ void search(char *x, int m, char* y, int n, int shifts[], int indx[], int results[]) {
int idx = blockIdx.x * blockDim.x + threadIdx.x;
if ( idx > (n - m) ) { results[idx] = 0; return; }
if ( indx[idx] != idx ) { results[idx] = 0; return; }
if(x[0]==y[idx] && x[m-1]==y[idx+m-1]) {
/*
if(idx>1000 && idx<1100) {
hipLaunchKernelGGL(( compare), dim3(1),dim3(m), 0, 0, idx);
}
*/
if(m>2)
hipLaunchKernelGGL(( compare), dim3(1),dim3(m), 0, 0, idx,x,y,m,results);
/*
for( int i = 0; i < m; ++i ) {
if ( x[i] != y[idx + i] ) {
results[idx] = 0;
return;
}
}
*/
} else {
results[idx] = 0;
}
}
char* readfile(const char* filename) {
FILE* f;
char* data;
f= fopen(filename, "r");
if ( f != NULL ) {
fseek(f,0,SEEK_END);
int size=ftell(f);
fseek(f,0,SEEK_SET);
data = (char*)malloc((size) * sizeof(char));
fread(data, size,1,f);
}
fclose(f);
return data;
}
void precomputeShiftIndx(char* y, int n, int m, int shifts[], int indx[]) {
int j = 0;
int limit = n - m;
while (j <= limit ) {
j += shifts[ y[j + m] ];
indx[j] = j;
}
}
void display_results(int n, int res[]) {
int j=0;
for( int i =0; i < n; ++i )
if ( res[i] == 1 ) {
j++;
// printf("%d\n",i);
}
// printf("%d\n",j);
}
int main(int argc, char* argv[]) {
int cuda_device = 0;
size_t n = 0;
size_t m = 0;
if ( argc < 4 ) {
// printf("Usage: ./a.out <device number> <pattern> <data file>\n");
return -1;
}
if( argc > 1 )
cuda_device = atoi( argv[1] );
char* mainString = readfile(argv[3]);
char* subString = (char*) malloc( (strlen(argv[2])) * sizeof(char) );
strcpy(subString, argv[2]);
n = strlen(mainString);
m = strlen(subString);
int* results=(int*)malloc(n * sizeof(int));
int* l_shifts = (int*)malloc( ASIZE * sizeof(int) );
for( int i = 0; i < ASIZE; ++i )
l_shifts[i] = m + 1;
int* l_indx = (int*) malloc( n * sizeof(int) );
for( int i = 0; i < n; ++i ) {
l_indx[i] = -1;
results[i]=1;
}
l_indx[0]=0;
// hipError_t error;
hipEvent_t start_event, stop_event;
float time1, time2;
checkCudaErrors( hipEventCreate(&start_event) );
checkCudaErrors( hipEventCreate(&stop_event) );
int num_devices=0;
checkCudaErrors( hipGetDeviceCount(&num_devices) );
if(0==num_devices)
{
// printf("Your system does not have a CUDA capable device\n");
return 1;
}
/*
if( cuda_device >= num_devices )
{
if(num_devices==0)
// printf("You have only 1 device and it's id is 0\n");
else
// printf("choose device ID between 0 and %d\n", num_devices-1);
return 1;
}
*/
hipDeviceProp_t deviceProp;
checkCudaErrors( hipGetDeviceProperties(&deviceProp, cuda_device) );
// if( (1 == deviceProp.major) && (deviceProp.minor < 1))
// printf("%s does not have compute capability 1.1 or later\n", deviceProp.name);
// printf("Device name : %s\n", deviceProp.name );
// printf("CUDA Capable SM %d.%d hardware with %d multi-processors\n", deviceProp.major, deviceProp.minor, deviceProp.multiProcessorCount);
// printf("array_size = %zd\n", n);
char* d_substr = 0;
int* d_shifts = 0;
int* d_indx = 0;
char* d_text = 0;
int* d_results = 0;
checkCudaErrors( hipMalloc((void**)&d_shifts, sizeof(int)*ASIZE));
checkCudaErrors( hipMalloc((void**)&d_indx, n * sizeof(int)) );
checkCudaErrors( hipMalloc((void**)&d_results, n * sizeof(int)) );
checkCudaErrors( hipMalloc((void**)&d_substr, (m)*sizeof(char)) );
checkCudaErrors( hipMalloc((void**)&d_text, (strlen(mainString))*sizeof(char)) );
checkCudaErrors( hipMemcpy(d_shifts, l_shifts, sizeof(int) * ASIZE, hipMemcpyHostToDevice ) );
checkCudaErrors( hipMemcpy(d_results, results, sizeof(int) * n, hipMemcpyHostToDevice ) );
checkCudaErrors( hipMemcpy(d_text, mainString, sizeof(char)*(strlen(mainString)), hipMemcpyHostToDevice ) );
checkCudaErrors( hipMemcpy(d_substr, subString, sizeof(char)*(m), hipMemcpyHostToDevice) );
// error = hipGetLastError();
// printf("%s\n", hipGetErrorString(error));
dim3 threadsPerBlocks(ASIZE, 1);
int t = m / threadsPerBlocks.x;
int t1 = m % threadsPerBlocks.x;
if ( t1 != 0 ) t += 1;
dim3 numBlocks(t,1);
// printf("Launching kernel with blocks=%d, threadsperblock=%d\n", numBlocks.x, threadsPerBlocks.x);
hipEventRecord(start_event, 0);
hipLaunchKernelGGL(( processPattern), dim3(numBlocks),dim3(threadsPerBlocks), 0, 0, d_substr, m, d_shifts);
hipDeviceSynchronize();
hipEventRecord(stop_event, 0);
hipEventSynchronize( stop_event );
hipEventElapsedTime( &time1, start_event, stop_event );
checkCudaErrors( hipMemcpy(l_shifts, d_shifts, sizeof(int) * ASIZE, hipMemcpyDeviceToHost ) );
precomputeShiftIndx(mainString , n, m, l_shifts, l_indx);
checkCudaErrors( hipMemcpy(d_indx, l_indx, n * sizeof(int), hipMemcpyHostToDevice) );
/*
// For debugging
for( int i = 0; i < ASIZE; ++i )
printf("%d\t",l_shifts[i]);
printf("\n\n");
for( int i = 0; i < n; ++i )
printf("%d\t",l_indx[i]);
printf("\n\n");
printf("%zd\t%zd",n,m);
printf("\n\n");
*/
t = n / threadsPerBlocks.x;
t1 = n % threadsPerBlocks.x;
if ( t1 != 0 ) t += 1;
dim3 numBlocks2(t, 1);
// printf("Launching kernel with blocks=%d, threadsperblock=%d\n", numBlocks2.x, threadsPerBlocks.x);
hipEventRecord(start_event, 0);
hipLaunchKernelGGL(( search), dim3(numBlocks2),dim3(threadsPerBlocks), 0, 0, d_substr, m, d_text, n, d_shifts, d_indx, d_results);
hipDeviceSynchronize();
hipEventRecord(stop_event, 0);
hipEventSynchronize( stop_event );
hipEventElapsedTime( &time2, start_event, stop_event );
hipEventDestroy( start_event );
hipEventDestroy( stop_event );
// printf("%f+%f=%f milliseconds\t",time1, time2, time1+time2);
printf("%f\t",time1+time2);
checkCudaErrors( hipMemcpy(results, d_results, n * sizeof(int), hipMemcpyDeviceToHost) );
display_results(n, results);
hipFree(d_substr);
hipFree(d_shifts);
hipFree(d_indx);
hipFree(d_text);
hipFree(d_results);
// free(mainString);
free(subString);
free(l_indx);
free(l_shifts);
free(results);
hipDeviceReset();
}
| fdcb5146aceb56ddacea212f085feb8930906149.cu | #include <cuda.h>
#include <helper_cuda.h>
#include <stdio.h>
#include <stdlib.h>
#include <string>
#include <iostream>
#include <fstream>
#define ASIZE 256
#define PRIME 1000009
__global__ void processPattern(char* x ,int m, int shifts[]) {
int idx = blockIdx.x * blockDim.x + threadIdx.x;
if ( idx >= m ) return;
char c = x[idx];
for( int i = m - 1; i >= idx; --i ) {
if ( x[i] == c ) {
shifts[c] = m - i;
return;
}
}
}
__global__ void compare(int idx,char *x, char *y, int m, int* results) {
int id = blockIdx.x * blockDim.x + threadIdx.x;
// printf("%d\t%d\n",idx,id);
if(x[id]!=y[idx+id]) {
results[idx]=0;
return;
} else {
return;
}
}
__global__ void search(char *x, int m, char* y, int n, int shifts[], int indx[], int results[]) {
int idx = blockIdx.x * blockDim.x + threadIdx.x;
if ( idx > (n - m) ) { results[idx] = 0; return; }
if ( indx[idx] != idx ) { results[idx] = 0; return; }
if(x[0]==y[idx] && x[m-1]==y[idx+m-1]) {
/*
if(idx>1000 && idx<1100) {
compare<<<1,m>>>(idx);
}
*/
if(m>2)
compare<<<1,m>>>(idx,x,y,m,results);
/*
for( int i = 0; i < m; ++i ) {
if ( x[i] != y[idx + i] ) {
results[idx] = 0;
return;
}
}
*/
} else {
results[idx] = 0;
}
}
char* readfile(const char* filename) {
FILE* f;
char* data;
f= fopen(filename, "r");
if ( f != NULL ) {
fseek(f,0,SEEK_END);
int size=ftell(f);
fseek(f,0,SEEK_SET);
data = (char*)malloc((size) * sizeof(char));
fread(data, size,1,f);
}
fclose(f);
return data;
}
void precomputeShiftIndx(char* y, int n, int m, int shifts[], int indx[]) {
int j = 0;
int limit = n - m;
while (j <= limit ) {
j += shifts[ y[j + m] ];
indx[j] = j;
}
}
void display_results(int n, int res[]) {
int j=0;
for( int i =0; i < n; ++i )
if ( res[i] == 1 ) {
j++;
// printf("%d\n",i);
}
// printf("%d\n",j);
}
int main(int argc, char* argv[]) {
int cuda_device = 0;
size_t n = 0;
size_t m = 0;
if ( argc < 4 ) {
// printf("Usage: ./a.out <device number> <pattern> <data file>\n");
return -1;
}
if( argc > 1 )
cuda_device = atoi( argv[1] );
char* mainString = readfile(argv[3]);
char* subString = (char*) malloc( (strlen(argv[2])) * sizeof(char) );
strcpy(subString, argv[2]);
n = strlen(mainString);
m = strlen(subString);
int* results=(int*)malloc(n * sizeof(int));
int* l_shifts = (int*)malloc( ASIZE * sizeof(int) );
for( int i = 0; i < ASIZE; ++i )
l_shifts[i] = m + 1;
int* l_indx = (int*) malloc( n * sizeof(int) );
for( int i = 0; i < n; ++i ) {
l_indx[i] = -1;
results[i]=1;
}
l_indx[0]=0;
// cudaError_t error;
cudaEvent_t start_event, stop_event;
float time1, time2;
checkCudaErrors( cudaEventCreate(&start_event) );
checkCudaErrors( cudaEventCreate(&stop_event) );
int num_devices=0;
checkCudaErrors( cudaGetDeviceCount(&num_devices) );
if(0==num_devices)
{
// printf("Your system does not have a CUDA capable device\n");
return 1;
}
/*
if( cuda_device >= num_devices )
{
if(num_devices==0)
// printf("You have only 1 device and it's id is 0\n");
else
// printf("choose device ID between 0 and %d\n", num_devices-1);
return 1;
}
*/
cudaDeviceProp deviceProp;
checkCudaErrors( cudaGetDeviceProperties(&deviceProp, cuda_device) );
// if( (1 == deviceProp.major) && (deviceProp.minor < 1))
// printf("%s does not have compute capability 1.1 or later\n", deviceProp.name);
// printf("Device name : %s\n", deviceProp.name );
// printf("CUDA Capable SM %d.%d hardware with %d multi-processors\n", deviceProp.major, deviceProp.minor, deviceProp.multiProcessorCount);
// printf("array_size = %zd\n", n);
char* d_substr = 0;
int* d_shifts = 0;
int* d_indx = 0;
char* d_text = 0;
int* d_results = 0;
checkCudaErrors( cudaMalloc((void**)&d_shifts, sizeof(int)*ASIZE));
checkCudaErrors( cudaMalloc((void**)&d_indx, n * sizeof(int)) );
checkCudaErrors( cudaMalloc((void**)&d_results, n * sizeof(int)) );
checkCudaErrors( cudaMalloc((void**)&d_substr, (m)*sizeof(char)) );
checkCudaErrors( cudaMalloc((void**)&d_text, (strlen(mainString))*sizeof(char)) );
checkCudaErrors( cudaMemcpy(d_shifts, l_shifts, sizeof(int) * ASIZE, cudaMemcpyHostToDevice ) );
checkCudaErrors( cudaMemcpy(d_results, results, sizeof(int) * n, cudaMemcpyHostToDevice ) );
checkCudaErrors( cudaMemcpy(d_text, mainString, sizeof(char)*(strlen(mainString)), cudaMemcpyHostToDevice ) );
checkCudaErrors( cudaMemcpy(d_substr, subString, sizeof(char)*(m), cudaMemcpyHostToDevice) );
// error = cudaGetLastError();
// printf("%s\n", cudaGetErrorString(error));
dim3 threadsPerBlocks(ASIZE, 1);
int t = m / threadsPerBlocks.x;
int t1 = m % threadsPerBlocks.x;
if ( t1 != 0 ) t += 1;
dim3 numBlocks(t,1);
// printf("Launching kernel with blocks=%d, threadsperblock=%d\n", numBlocks.x, threadsPerBlocks.x);
cudaEventRecord(start_event, 0);
processPattern<<<numBlocks,threadsPerBlocks>>>(d_substr, m, d_shifts);
cudaThreadSynchronize();
cudaEventRecord(stop_event, 0);
cudaEventSynchronize( stop_event );
cudaEventElapsedTime( &time1, start_event, stop_event );
checkCudaErrors( cudaMemcpy(l_shifts, d_shifts, sizeof(int) * ASIZE, cudaMemcpyDeviceToHost ) );
precomputeShiftIndx(mainString , n, m, l_shifts, l_indx);
checkCudaErrors( cudaMemcpy(d_indx, l_indx, n * sizeof(int), cudaMemcpyHostToDevice) );
/*
// For debugging
for( int i = 0; i < ASIZE; ++i )
printf("%d\t",l_shifts[i]);
printf("\n\n");
for( int i = 0; i < n; ++i )
printf("%d\t",l_indx[i]);
printf("\n\n");
printf("%zd\t%zd",n,m);
printf("\n\n");
*/
t = n / threadsPerBlocks.x;
t1 = n % threadsPerBlocks.x;
if ( t1 != 0 ) t += 1;
dim3 numBlocks2(t, 1);
// printf("Launching kernel with blocks=%d, threadsperblock=%d\n", numBlocks2.x, threadsPerBlocks.x);
cudaEventRecord(start_event, 0);
search<<<numBlocks2,threadsPerBlocks>>>(d_substr, m, d_text, n, d_shifts, d_indx, d_results);
cudaThreadSynchronize();
cudaEventRecord(stop_event, 0);
cudaEventSynchronize( stop_event );
cudaEventElapsedTime( &time2, start_event, stop_event );
cudaEventDestroy( start_event );
cudaEventDestroy( stop_event );
// printf("%f+%f=%f milliseconds\t",time1, time2, time1+time2);
printf("%f\t",time1+time2);
checkCudaErrors( cudaMemcpy(results, d_results, n * sizeof(int), cudaMemcpyDeviceToHost) );
display_results(n, results);
cudaFree(d_substr);
cudaFree(d_shifts);
cudaFree(d_indx);
cudaFree(d_text);
cudaFree(d_results);
// free(mainString);
free(subString);
free(l_indx);
free(l_shifts);
free(results);
cudaThreadExit();
}
|
9230f9df66e04873b50fc81b0a67013b2419b16d.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "basic_kernels_cuda.cuh"
#include <hiprand/hiprand.h>
#include <hiprand/hiprand_kernel.h>
__global__ void DFSPH_setVector3dBufferToZero_kernel(Vector3d* buff, unsigned int buff_size) {
int i = blockIdx.x * blockDim.x + threadIdx.x;
if (i >= buff_size) { return; }
buff[i] = Vector3d(0, 0, 0);
}
template<class T> __global__ void cuda_setBufferToValue_kernel(T* buff, T value, unsigned int buff_size) {
int i = blockIdx.x * blockDim.x + threadIdx.x;
if (i >= buff_size) { return; }
buff[i] = value;
}
template __global__ void cuda_setBufferToValue_kernel<Vector3d>(Vector3d* buff, Vector3d value, unsigned int buff_size);
template __global__ void cuda_setBufferToValue_kernel<int>(int* buff, int value, unsigned int buff_size);
template __global__ void cuda_setBufferToValue_kernel<RealCuda>(RealCuda* buff, RealCuda value, unsigned int buff_size);
__global__ void DFSPH_Histogram_kernel(unsigned int* in, unsigned int* out, unsigned int num_particles) {
int i = blockIdx.x * blockDim.x + threadIdx.x;
if (i >= num_particles) { return; }
atomicAdd(&(out[in[i]]), 1);
}
__global__ void DFSPH_setBufferValueToItself_kernel(unsigned int* buff, unsigned int buff_size) {
int i = blockIdx.x * blockDim.x + threadIdx.x;
if (i >= buff_size) { return; }
buff[i] = i;
}
__global__ void apply_delta_to_buffer_kernel(Vector3d* buffer, Vector3d delta, const unsigned int size) {
int i = blockIdx.x * blockDim.x + threadIdx.x;
if (i >= size) { return; }
buffer[i] += delta;
}
template<class T>
__global__ void fillRandom_kernel(unsigned int *buff, unsigned int nbElements, T min, T max, hiprandState_t *state) {
int i = blockIdx.x * blockDim.x + threadIdx.x;
if (i >= 1) { return; }
hiprandState_t localState = *state;
for (int j = 0; j < nbElements; ++j) {
T x = hiprand(&localState);
x *= (max - min);
x += min;
buff[i] = x;
}
*state = localState;
} | 9230f9df66e04873b50fc81b0a67013b2419b16d.cu | #include "basic_kernels_cuda.cuh"
#include <curand.h>
#include <curand_kernel.h>
__global__ void DFSPH_setVector3dBufferToZero_kernel(Vector3d* buff, unsigned int buff_size) {
int i = blockIdx.x * blockDim.x + threadIdx.x;
if (i >= buff_size) { return; }
buff[i] = Vector3d(0, 0, 0);
}
template<class T> __global__ void cuda_setBufferToValue_kernel(T* buff, T value, unsigned int buff_size) {
int i = blockIdx.x * blockDim.x + threadIdx.x;
if (i >= buff_size) { return; }
buff[i] = value;
}
template __global__ void cuda_setBufferToValue_kernel<Vector3d>(Vector3d* buff, Vector3d value, unsigned int buff_size);
template __global__ void cuda_setBufferToValue_kernel<int>(int* buff, int value, unsigned int buff_size);
template __global__ void cuda_setBufferToValue_kernel<RealCuda>(RealCuda* buff, RealCuda value, unsigned int buff_size);
__global__ void DFSPH_Histogram_kernel(unsigned int* in, unsigned int* out, unsigned int num_particles) {
int i = blockIdx.x * blockDim.x + threadIdx.x;
if (i >= num_particles) { return; }
atomicAdd(&(out[in[i]]), 1);
}
__global__ void DFSPH_setBufferValueToItself_kernel(unsigned int* buff, unsigned int buff_size) {
int i = blockIdx.x * blockDim.x + threadIdx.x;
if (i >= buff_size) { return; }
buff[i] = i;
}
__global__ void apply_delta_to_buffer_kernel(Vector3d* buffer, Vector3d delta, const unsigned int size) {
int i = blockIdx.x * blockDim.x + threadIdx.x;
if (i >= size) { return; }
buffer[i] += delta;
}
template<class T>
__global__ void fillRandom_kernel(unsigned int *buff, unsigned int nbElements, T min, T max, curandState *state) {
int i = blockIdx.x * blockDim.x + threadIdx.x;
if (i >= 1) { return; }
curandState localState = *state;
for (int j = 0; j < nbElements; ++j) {
T x = curand(&localState);
x *= (max - min);
x += min;
buff[i] = x;
}
*state = localState;
} |
12a2ea06b1cb8d2596b66eb757c5c4df51b854ac.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <math.h>
#include "CUDA_ActivationFunctions.h"
namespace NN {
namespace CUDA {
namespace AF {
__global__ void sigmoid_compute(float* input, float* output, int layer_size) {
int index = blockIdx.x * blockDim.x + threadIdx.x;
int stride = blockDim.x * gridDim.x;
for (int i = index; i < layer_size; i += stride) {
output[i] = 1.0f / (1.0f + exp(-input[i]));
}
}
__global__ void sigmoid_derivative(float* input_derivative,
float* output, float* output_derivative,
int layer_size) {
int index = blockIdx.x * blockDim.x + threadIdx.x;
int stride = blockDim.x * gridDim.x;
for (int i = index; i < layer_size; i += stride) {
input_derivative[i] += output[i]*(1.0f - output[i]) * output_derivative[i];
}
}
//-----------------------------------------------------------------------------
__global__ void tanh_compute(float* input, float* output, int layer_size) {
int index = blockIdx.x * blockDim.x + threadIdx.x;
int stride = blockDim.x * gridDim.x;
for (int i = index; i < layer_size; i += stride) {
output[i] = tanh(input[i]);
}
}
__global__ void tanh_derivative(float* input_derivative,
float* output, float* output_derivative,
int layer_size) {
int index = blockIdx.x * blockDim.x + threadIdx.x;
int stride = blockDim.x * gridDim.x;
for (int i = index; i < layer_size; i += stride) {
input_derivative[i] += (1.0f - output[i]*output[i]) * output_derivative[i];
}
}
//-----------------------------------------------------------------------------
__global__ void relu_compute(float* input, float* output, int layer_size) {
int index = blockIdx.x * blockDim.x + threadIdx.x;
int stride = blockDim.x * gridDim.x;
for (int i = index; i < layer_size; i += stride) {
output[i] = (input[i] > 0.0f) ? input[i] : 0.0f;
}
}
__global__ void relu_derivative(float* input, float* input_derivative,
float* output_derivative,
int layer_size) {
int index = blockIdx.x * blockDim.x + threadIdx.x;
int stride = blockDim.x * gridDim.x;
for (int i = index; i < layer_size; i += stride) {
input_derivative[i] += ((input[i] > 0.0f) ? 1.0f : 0.0f) * output_derivative[i];
}
}
//-----------------------------------------------------------------------------
__global__ void leaky_relu_compute(float* input, float* output, int layer_size, float alpha) {
int index = blockIdx.x * blockDim.x + threadIdx.x;
int stride = blockDim.x * gridDim.x;
for (int i = index; i < layer_size; i += stride) {
output[i] = (input[i] > 0.0f) ? input[i] : alpha*input[i];
}
}
__global__ void leaky_relu_derivative(float* input, float* input_derivative,
float* output_derivative,
int layer_size, float alpha) {
int index = blockIdx.x * blockDim.x + threadIdx.x;
int stride = blockDim.x * gridDim.x;
for (int i = index; i < layer_size; i += stride) {
input_derivative[i] += ((input[i] > 0.0f) ? 1.0f : alpha) * output_derivative[i];
}
}
//-----------------------------------------------------------------------------
__global__ void sin_compute(float* input, float* output, int layer_size) {
int index = blockIdx.x * blockDim.x + threadIdx.x;
int stride = blockDim.x * gridDim.x;
for (int i = index; i < layer_size; i += stride) {
output[i] = sin(input[i]);
}
}
__global__ void sin_derivative(float* input, float* input_derivative,
float* output_derivative,
int layer_size) {
int index = blockIdx.x * blockDim.x + threadIdx.x;
int stride = blockDim.x * gridDim.x;
for (int i = index; i < layer_size; i += stride) {
input_derivative[i] += cos(input[i]) * output_derivative[i];
}
}
}
}
} | 12a2ea06b1cb8d2596b66eb757c5c4df51b854ac.cu | #include <math.h>
#include "CUDA_ActivationFunctions.h"
namespace NN {
namespace CUDA {
namespace AF {
__global__ void sigmoid_compute(float* input, float* output, int layer_size) {
int index = blockIdx.x * blockDim.x + threadIdx.x;
int stride = blockDim.x * gridDim.x;
for (int i = index; i < layer_size; i += stride) {
output[i] = 1.0f / (1.0f + exp(-input[i]));
}
}
__global__ void sigmoid_derivative(float* input_derivative,
float* output, float* output_derivative,
int layer_size) {
int index = blockIdx.x * blockDim.x + threadIdx.x;
int stride = blockDim.x * gridDim.x;
for (int i = index; i < layer_size; i += stride) {
input_derivative[i] += output[i]*(1.0f - output[i]) * output_derivative[i];
}
}
//-----------------------------------------------------------------------------
__global__ void tanh_compute(float* input, float* output, int layer_size) {
int index = blockIdx.x * blockDim.x + threadIdx.x;
int stride = blockDim.x * gridDim.x;
for (int i = index; i < layer_size; i += stride) {
output[i] = tanh(input[i]);
}
}
__global__ void tanh_derivative(float* input_derivative,
float* output, float* output_derivative,
int layer_size) {
int index = blockIdx.x * blockDim.x + threadIdx.x;
int stride = blockDim.x * gridDim.x;
for (int i = index; i < layer_size; i += stride) {
input_derivative[i] += (1.0f - output[i]*output[i]) * output_derivative[i];
}
}
//-----------------------------------------------------------------------------
__global__ void relu_compute(float* input, float* output, int layer_size) {
int index = blockIdx.x * blockDim.x + threadIdx.x;
int stride = blockDim.x * gridDim.x;
for (int i = index; i < layer_size; i += stride) {
output[i] = (input[i] > 0.0f) ? input[i] : 0.0f;
}
}
__global__ void relu_derivative(float* input, float* input_derivative,
float* output_derivative,
int layer_size) {
int index = blockIdx.x * blockDim.x + threadIdx.x;
int stride = blockDim.x * gridDim.x;
for (int i = index; i < layer_size; i += stride) {
input_derivative[i] += ((input[i] > 0.0f) ? 1.0f : 0.0f) * output_derivative[i];
}
}
//-----------------------------------------------------------------------------
__global__ void leaky_relu_compute(float* input, float* output, int layer_size, float alpha) {
int index = blockIdx.x * blockDim.x + threadIdx.x;
int stride = blockDim.x * gridDim.x;
for (int i = index; i < layer_size; i += stride) {
output[i] = (input[i] > 0.0f) ? input[i] : alpha*input[i];
}
}
__global__ void leaky_relu_derivative(float* input, float* input_derivative,
float* output_derivative,
int layer_size, float alpha) {
int index = blockIdx.x * blockDim.x + threadIdx.x;
int stride = blockDim.x * gridDim.x;
for (int i = index; i < layer_size; i += stride) {
input_derivative[i] += ((input[i] > 0.0f) ? 1.0f : alpha) * output_derivative[i];
}
}
//-----------------------------------------------------------------------------
__global__ void sin_compute(float* input, float* output, int layer_size) {
int index = blockIdx.x * blockDim.x + threadIdx.x;
int stride = blockDim.x * gridDim.x;
for (int i = index; i < layer_size; i += stride) {
output[i] = sin(input[i]);
}
}
__global__ void sin_derivative(float* input, float* input_derivative,
float* output_derivative,
int layer_size) {
int index = blockIdx.x * blockDim.x + threadIdx.x;
int stride = blockDim.x * gridDim.x;
for (int i = index; i < layer_size; i += stride) {
input_derivative[i] += cos(input[i]) * output_derivative[i];
}
}
}
}
} |
a3879c2081773a3dba2043248e3bfd347d098ee1.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/* ==================================================================
Programmer: Arunbalaji Prithiviraj (U#80066848) [email protected]
The basic SDH algorithm implementation for 3D data
To compile: nvcc proj1-80066848.cu -o output in the rc machines
==================================================================
*/
#include <stdio.h>
#include <stdlib.h>
#include <math.h>
#include <sys/time.h>
#define BOX_SIZE 23000 /* size of the data box on one dimension */
/* descriptors for single atom in the tree */
typedef struct atomdesc {
double x_pos;
double y_pos;
double z_pos;
} atom;
typedef struct hist_entry{
//float min;
//float max;
unsigned long long d_cnt; /* need a long long type as the count might be huge */
} bucket;
bucket * histogram; /* list of all buckets in the histogram */
long long PDH_acnt; /* total number of data points */
int num_buckets; /* total number of buckets in the histogram */
double PDH_res; /* value of w */
atom * atom_list; /* list of all data points */
/* Thesea are for an old way of tracking time */
struct timezone Idunno;
struct timeval startTime, endTime;
/*
distance of two points in the atom_list
*/
double p2p_distance(int ind1, int ind2) {
double x1 = atom_list[ind1].x_pos;
double x2 = atom_list[ind2].x_pos;
double y1 = atom_list[ind1].y_pos;
double y2 = atom_list[ind2].y_pos;
double z1 = atom_list[ind1].z_pos;
double z2 = atom_list[ind2].z_pos;
return sqrt((x1 - x2)*(x1-x2) + (y1 - y2)*(y1 - y2) + (z1 - z2)*(z1 - z2));
}
/*
brute-force SDH solution in a single CPU thread
*/
int PDH_baseline() {
int i, j, h_pos;
double dist;
for(i = 0; i < PDH_acnt; i++) {
for(j = i+1; j < PDH_acnt; j++) {
dist = p2p_distance(i,j);
h_pos = (int) (dist / PDH_res);
histogram[h_pos].d_cnt++;
}
}
return 0;
}
/*
set a checkpoint and show the (natural) running time in seconds
*/
double report_running_time() {
long sec_diff, usec_diff;
gettimeofday(&endTime, &Idunno);
sec_diff = endTime.tv_sec - startTime.tv_sec;
usec_diff= endTime.tv_usec-startTime.tv_usec;
if(usec_diff < 0) {
sec_diff --;
usec_diff += 1000000;
}
printf("Running time for CPU version: %ld.%06ld\n", sec_diff, usec_diff);
return (double)(sec_diff*1.0 + usec_diff/1000000.0);
}
/*
CUDA Kernel. Each Thread takes care of one element of d_atom_list
*/
__global__ void SDH (atom* atomlist,bucket* hist, int PDH_acnt, double PDH_res)
{
int id = blockIdx.x*blockDim.x+threadIdx.x;
int j,h_pos;
double dist;
double x1,x2,y1,y2,z1,z2;
if(id >= PDH_acnt) return;
for(j = id+1; j < PDH_acnt; j++)
{
x1 = atomlist[id].x_pos;
x2 = atomlist[j].x_pos;
y1 = atomlist[id].y_pos;
y2 = atomlist[j].y_pos;
z1 = atomlist[id].z_pos;
z2 = atomlist[j].z_pos;
dist = sqrt((x1 - x2)*(x1-x2) + (y1 - y2)*(y1 - y2) + (z1 - z2)*(z1 - z2));
h_pos = (int) (dist / PDH_res);
atomicAdd(&hist[h_pos].d_cnt,1);
}
}
/*
print the counts in all buckets of the histogram
*/
void output_histogram(bucket* output){
int i;
long long total_cnt = 0;
for(i=0; i< num_buckets; i++) {
if(i%5 == 0) /* we print 5 buckets in a row */
printf("\n%02d: ", i);
printf("%15lld ", output[i].d_cnt);
total_cnt += output[i].d_cnt;
/* we also want to make sure the total distance count is correct */
if(i == num_buckets - 1)
printf("\n T:%lld \n", total_cnt);
else printf("| ");
}
}
int main(int argc, char **argv)
{
int i;
bucket* h_histogram;
/*Device variable declaration*/
atom* d_atom_list;
bucket* d_histogram;
/*command line arguments */
PDH_acnt = atoi(argv[1]);
PDH_res = atof(argv[2]);
/*Configuration parameters for the device*/
dim3 dimGrid(ceil(PDH_acnt/512)+1,1,1);
dim3 dimBlock(512,1,1);
//printf("args are %d and %f\n", PDH_acnt, PDH_res);
/*CPU memory allocation */
num_buckets = (int)(BOX_SIZE * 1.732 / PDH_res) + 1;
histogram = (bucket *)malloc(sizeof(bucket)*num_buckets);
h_histogram = (bucket *)malloc(sizeof(bucket)*num_buckets);
atom_list = (atom *)malloc(sizeof(atom)*PDH_acnt);
/*Device memory allocation */
hipMalloc(&d_atom_list,sizeof(atom)*PDH_acnt);
hipMalloc(&d_histogram,sizeof(bucket)*num_buckets);
srand(1);
/* generate data following a uniform distribution */
for(i = 0; i < PDH_acnt; i++) {
atom_list[i].x_pos = ((double)(rand()) / RAND_MAX) * BOX_SIZE;
atom_list[i].y_pos = ((double)(rand()) / RAND_MAX) * BOX_SIZE;
atom_list[i].z_pos = ((double)(rand()) / RAND_MAX) * BOX_SIZE;
}
/* Copy host array to device*/
hipMemcpy(d_atom_list,atom_list,sizeof(atom)*PDH_acnt,hipMemcpyHostToDevice);
hipMemcpy(d_histogram,histogram,sizeof(bucket)*num_buckets,hipMemcpyHostToDevice);
/* start counting time */
gettimeofday(&startTime, &Idunno);
/* call CPU single thread version to compute the histogram */
PDH_baseline();
/* check the total running time */
report_running_time();
/* print out the histogram */
output_histogram(histogram);
/* Excute the kernel */
hipLaunchKernelGGL(( SDH), dim3(dimGrid),dim3(dimBlock), 0, 0, d_atom_list,d_histogram,PDH_acnt,PDH_res);
/*Copy array back to host */
hipMemcpy(h_histogram,d_histogram,sizeof(bucket)*num_buckets,hipMemcpyDeviceToHost);
/* print out the histogram */
printf("\nGPU version:");
output_histogram(h_histogram);
/*Release Device Memory*/
hipFree(d_atom_list);
hipFree(d_histogram);
/*Release CPU Memory*/
free(atom_list);
free(histogram);
free(h_histogram);
return 0;
}
| a3879c2081773a3dba2043248e3bfd347d098ee1.cu | /* ==================================================================
Programmer: Arunbalaji Prithiviraj (U#80066848) [email protected]
The basic SDH algorithm implementation for 3D data
To compile: nvcc proj1-80066848.cu -o output in the rc machines
==================================================================
*/
#include <stdio.h>
#include <stdlib.h>
#include <math.h>
#include <sys/time.h>
#define BOX_SIZE 23000 /* size of the data box on one dimension */
/* descriptors for single atom in the tree */
typedef struct atomdesc {
double x_pos;
double y_pos;
double z_pos;
} atom;
typedef struct hist_entry{
//float min;
//float max;
unsigned long long d_cnt; /* need a long long type as the count might be huge */
} bucket;
bucket * histogram; /* list of all buckets in the histogram */
long long PDH_acnt; /* total number of data points */
int num_buckets; /* total number of buckets in the histogram */
double PDH_res; /* value of w */
atom * atom_list; /* list of all data points */
/* Thesea are for an old way of tracking time */
struct timezone Idunno;
struct timeval startTime, endTime;
/*
distance of two points in the atom_list
*/
double p2p_distance(int ind1, int ind2) {
double x1 = atom_list[ind1].x_pos;
double x2 = atom_list[ind2].x_pos;
double y1 = atom_list[ind1].y_pos;
double y2 = atom_list[ind2].y_pos;
double z1 = atom_list[ind1].z_pos;
double z2 = atom_list[ind2].z_pos;
return sqrt((x1 - x2)*(x1-x2) + (y1 - y2)*(y1 - y2) + (z1 - z2)*(z1 - z2));
}
/*
brute-force SDH solution in a single CPU thread
*/
int PDH_baseline() {
int i, j, h_pos;
double dist;
for(i = 0; i < PDH_acnt; i++) {
for(j = i+1; j < PDH_acnt; j++) {
dist = p2p_distance(i,j);
h_pos = (int) (dist / PDH_res);
histogram[h_pos].d_cnt++;
}
}
return 0;
}
/*
set a checkpoint and show the (natural) running time in seconds
*/
double report_running_time() {
long sec_diff, usec_diff;
gettimeofday(&endTime, &Idunno);
sec_diff = endTime.tv_sec - startTime.tv_sec;
usec_diff= endTime.tv_usec-startTime.tv_usec;
if(usec_diff < 0) {
sec_diff --;
usec_diff += 1000000;
}
printf("Running time for CPU version: %ld.%06ld\n", sec_diff, usec_diff);
return (double)(sec_diff*1.0 + usec_diff/1000000.0);
}
/*
CUDA Kernel. Each Thread takes care of one element of d_atom_list
*/
__global__ void SDH (atom* atomlist,bucket* hist, int PDH_acnt, double PDH_res)
{
int id = blockIdx.x*blockDim.x+threadIdx.x;
int j,h_pos;
double dist;
double x1,x2,y1,y2,z1,z2;
if(id >= PDH_acnt) return;
for(j = id+1; j < PDH_acnt; j++)
{
x1 = atomlist[id].x_pos;
x2 = atomlist[j].x_pos;
y1 = atomlist[id].y_pos;
y2 = atomlist[j].y_pos;
z1 = atomlist[id].z_pos;
z2 = atomlist[j].z_pos;
dist = sqrt((x1 - x2)*(x1-x2) + (y1 - y2)*(y1 - y2) + (z1 - z2)*(z1 - z2));
h_pos = (int) (dist / PDH_res);
atomicAdd(&hist[h_pos].d_cnt,1);
}
}
/*
print the counts in all buckets of the histogram
*/
void output_histogram(bucket* output){
int i;
long long total_cnt = 0;
for(i=0; i< num_buckets; i++) {
if(i%5 == 0) /* we print 5 buckets in a row */
printf("\n%02d: ", i);
printf("%15lld ", output[i].d_cnt);
total_cnt += output[i].d_cnt;
/* we also want to make sure the total distance count is correct */
if(i == num_buckets - 1)
printf("\n T:%lld \n", total_cnt);
else printf("| ");
}
}
int main(int argc, char **argv)
{
int i;
bucket* h_histogram;
/*Device variable declaration*/
atom* d_atom_list;
bucket* d_histogram;
/*command line arguments */
PDH_acnt = atoi(argv[1]);
PDH_res = atof(argv[2]);
/*Configuration parameters for the device*/
dim3 dimGrid(ceil(PDH_acnt/512)+1,1,1);
dim3 dimBlock(512,1,1);
//printf("args are %d and %f\n", PDH_acnt, PDH_res);
/*CPU memory allocation */
num_buckets = (int)(BOX_SIZE * 1.732 / PDH_res) + 1;
histogram = (bucket *)malloc(sizeof(bucket)*num_buckets);
h_histogram = (bucket *)malloc(sizeof(bucket)*num_buckets);
atom_list = (atom *)malloc(sizeof(atom)*PDH_acnt);
/*Device memory allocation */
cudaMalloc(&d_atom_list,sizeof(atom)*PDH_acnt);
cudaMalloc(&d_histogram,sizeof(bucket)*num_buckets);
srand(1);
/* generate data following a uniform distribution */
for(i = 0; i < PDH_acnt; i++) {
atom_list[i].x_pos = ((double)(rand()) / RAND_MAX) * BOX_SIZE;
atom_list[i].y_pos = ((double)(rand()) / RAND_MAX) * BOX_SIZE;
atom_list[i].z_pos = ((double)(rand()) / RAND_MAX) * BOX_SIZE;
}
/* Copy host array to device*/
cudaMemcpy(d_atom_list,atom_list,sizeof(atom)*PDH_acnt,cudaMemcpyHostToDevice);
cudaMemcpy(d_histogram,histogram,sizeof(bucket)*num_buckets,cudaMemcpyHostToDevice);
/* start counting time */
gettimeofday(&startTime, &Idunno);
/* call CPU single thread version to compute the histogram */
PDH_baseline();
/* check the total running time */
report_running_time();
/* print out the histogram */
output_histogram(histogram);
/* Excute the kernel */
SDH<<<dimGrid,dimBlock>>>(d_atom_list,d_histogram,PDH_acnt,PDH_res);
/*Copy array back to host */
cudaMemcpy(h_histogram,d_histogram,sizeof(bucket)*num_buckets,cudaMemcpyDeviceToHost);
/* print out the histogram */
printf("\nGPU version:");
output_histogram(h_histogram);
/*Release Device Memory*/
cudaFree(d_atom_list);
cudaFree(d_histogram);
/*Release CPU Memory*/
free(atom_list);
free(histogram);
free(h_histogram);
return 0;
}
|
03fda912fa0ec9cfa30ff2bfc8bcfedfd48238f0.hip | // !!! This is a file automatically generated by hipify!!!
#include "kernels.h"
#include <omp.h>
#include <hip/hip_runtime.h>
#include <driver_functions.h>
#include <math_constants.h>
#define IDX(i, j, n) ((i) * (n) + (j))
#define UPDIV(n, d) (((n) + (d) - 1) / (d))
static dim3 threadsPerBlock(1024, 1, 1);
#ifdef DEBUG
#define cudaCheckError(ans) { cudaAssert((ans), __FILE__, __LINE__); }
inline void cudaAssert(hipError_t code, const char *file, int line,
bool abort=true)
{
if (code != hipSuccess)
{
fprintf(stderr, "CUDA Error: %s at %s:%d\n",
hipGetErrorString(code), file, line);
if (abort) exit(code);
}
}
#else
#define cudaCheckError(ans) ans
#endif
__global__ void parallel_floyd_warshall(int n, int k, int* W) {
int idx = threadIdx.x + blockIdx.x * blockDim.x;
int i = (idx/n);
int j = (idx%n);
if(W[idx] != 0) {
W[idx] = 1;
}else {
if((W[i*n + k] != 0) && (W[k*n + j] != 0)){
W[idx] = 1;
}else{
W[idx] = 0;
}
}
__syncthreads();
}
__global__ void pushRelabelLockFreeKernel(int *residualFlow,
int *height, int *excessFlow, int *netFlowOutS, int *netFlowInT,
int s, int t, int n) {
int index = blockDim.x * blockIdx.x + threadIdx.x;
int u = index;
if (u >= s) {
u++;
}
if (u >= t) {
u++;
}
// one thread here for all vertices not s or t
while (*netFlowOutS != *netFlowInT) {
if (u < n && excessFlow[u] > 0) {
int curExcess = excessFlow[u];
int curLowestNeighbor = -1;
int neighborMinHeight = (int) CUDART_INF;
for (int v = 0; v < n; v++) {
if (u == v) continue;
if (residualFlow[IDX(u, v, n)] > 0) {
int tempHeight = height[v];
if (tempHeight < neighborMinHeight) {
curLowestNeighbor = v;
neighborMinHeight = tempHeight;
}
}
}
if (height[u] > neighborMinHeight) {
int delta = min(curExcess, residualFlow[IDX(u, curLowestNeighbor, n)]);
atomicSub(&residualFlow[IDX(u, curLowestNeighbor, n)], delta);
atomicAdd(&residualFlow[IDX(curLowestNeighbor, u, n)], delta);
atomicSub(&excessFlow[u], delta);
atomicAdd(&excessFlow[curLowestNeighbor], delta);
if (curLowestNeighbor == s) {
atomicSub(netFlowOutS, delta);
} else if (curLowestNeighbor == t) {
atomicAdd(netFlowInT, delta);
}
} else {
height[u] = neighborMinHeight + 1;
}
}
}
}
// Push-relabel algorithm to find max s-t flow. Based on lock-free implementation
// specified by Bo Hong. Uses one CUDA thread per vertex.
Flow *pushRelabelLockFreeGPU(Graph *g, int s, int t) {
int *residualFlow;
int *height;
int *excessFlow;
int *netFlowOutS;
int *netFlowInT;
int *tempHeights = (int *)calloc(g->n, sizeof(int));
int *tempExcessFlows = (int *)calloc(g->n, sizeof(int));
int *finalFlow = (int *)malloc((g->n * g->n) * sizeof(int));
memcpy(finalFlow, g->capacities, (g->n * g->n) * sizeof(int));
cudaCheckError(hipMalloc((void **)&residualFlow, sizeof(int) * (g->n * g->n)));
cudaCheckError(hipMalloc((void **)&height, sizeof(int) * g->n));
cudaCheckError(hipMalloc((void **)&excessFlow, sizeof(int) * g->n));
cudaCheckError(hipMalloc((void **)&netFlowOutS, sizeof(int)));
cudaCheckError(hipMalloc((void **)&netFlowInT, sizeof(int)));
// initialize preflow
int flowOutS = 0;
int flowInT = 0;
tempHeights[s] = g->n;
#pragma omp parallel for reduction(+:flowOutS)
for (int v = 0; v < g->n; v++) {
int cap = g->capacities[IDX(s, v, g->n)];
if (cap > 0 && (s != v)) {
finalFlow[IDX(s, v, g->n)] = 0;
finalFlow[IDX(v, s, g->n)] += cap;
flowOutS += cap;
tempExcessFlows[v] = cap;
if (v == t) {
flowInT += cap;
}
}
}
cudaCheckError(hipMemcpy(residualFlow, finalFlow, sizeof(int) * (g->n * g->n),
hipMemcpyHostToDevice));
cudaCheckError(hipMemcpy(height, tempHeights, sizeof(int) * g->n,
hipMemcpyHostToDevice));
cudaCheckError(hipMemcpy(excessFlow, tempExcessFlows, sizeof(int) * g->n,
hipMemcpyHostToDevice));
cudaCheckError(hipMemcpy(netFlowInT, &flowInT, sizeof(int),
hipMemcpyHostToDevice));
cudaCheckError(hipMemcpy(netFlowOutS, &flowOutS, sizeof(int),
hipMemcpyHostToDevice));
int numBlocks = UPDIV((g->n - 2), threadsPerBlock.x);
hipLaunchKernelGGL(( pushRelabelLockFreeKernel), dim3(numBlocks), dim3(threadsPerBlock), 0, 0, residualFlow,
height, excessFlow, netFlowOutS, netFlowInT, s, t, g->n);
free(tempHeights);
free(tempExcessFlows);
cudaCheckError(hipDeviceSynchronize());
cudaCheckError(hipMemcpy(finalFlow, residualFlow, sizeof(int) * (g->n * g->n),
hipMemcpyDeviceToHost));
cudaCheckError(hipMemcpy(&flowInT, netFlowInT, sizeof(int),
hipMemcpyDeviceToHost));
// now update flow to represent actual flow
#pragma omp parallel for schedule(static)
for (int i = 0; i < (g->n * g-> n); i++) {
finalFlow[i] = g->capacities[i] - finalFlow[i];
}
Flow *result = (Flow *)malloc(sizeof(Flow));
result->maxFlow = flowInT;
result->finalEdgeFlows = finalFlow;
cudaCheckError(hipFree(residualFlow));
cudaCheckError(hipFree(height));
cudaCheckError(hipFree(excessFlow));
cudaCheckError(hipFree(netFlowOutS));
cudaCheckError(hipFree(netFlowInT));
return result;
} | 03fda912fa0ec9cfa30ff2bfc8bcfedfd48238f0.cu | #include "kernels.h"
#include <omp.h>
#include <cuda_runtime.h>
#include <driver_functions.h>
#include <math_constants.h>
#define IDX(i, j, n) ((i) * (n) + (j))
#define UPDIV(n, d) (((n) + (d) - 1) / (d))
static dim3 threadsPerBlock(1024, 1, 1);
#ifdef DEBUG
#define cudaCheckError(ans) { cudaAssert((ans), __FILE__, __LINE__); }
inline void cudaAssert(cudaError_t code, const char *file, int line,
bool abort=true)
{
if (code != cudaSuccess)
{
fprintf(stderr, "CUDA Error: %s at %s:%d\n",
cudaGetErrorString(code), file, line);
if (abort) exit(code);
}
}
#else
#define cudaCheckError(ans) ans
#endif
__global__ void parallel_floyd_warshall(int n, int k, int* W) {
int idx = threadIdx.x + blockIdx.x * blockDim.x;
int i = (idx/n);
int j = (idx%n);
if(W[idx] != 0) {
W[idx] = 1;
}else {
if((W[i*n + k] != 0) && (W[k*n + j] != 0)){
W[idx] = 1;
}else{
W[idx] = 0;
}
}
__syncthreads();
}
__global__ void pushRelabelLockFreeKernel(int *residualFlow,
int *height, int *excessFlow, int *netFlowOutS, int *netFlowInT,
int s, int t, int n) {
int index = blockDim.x * blockIdx.x + threadIdx.x;
int u = index;
if (u >= s) {
u++;
}
if (u >= t) {
u++;
}
// one thread here for all vertices not s or t
while (*netFlowOutS != *netFlowInT) {
if (u < n && excessFlow[u] > 0) {
int curExcess = excessFlow[u];
int curLowestNeighbor = -1;
int neighborMinHeight = (int) CUDART_INF;
for (int v = 0; v < n; v++) {
if (u == v) continue;
if (residualFlow[IDX(u, v, n)] > 0) {
int tempHeight = height[v];
if (tempHeight < neighborMinHeight) {
curLowestNeighbor = v;
neighborMinHeight = tempHeight;
}
}
}
if (height[u] > neighborMinHeight) {
int delta = min(curExcess, residualFlow[IDX(u, curLowestNeighbor, n)]);
atomicSub(&residualFlow[IDX(u, curLowestNeighbor, n)], delta);
atomicAdd(&residualFlow[IDX(curLowestNeighbor, u, n)], delta);
atomicSub(&excessFlow[u], delta);
atomicAdd(&excessFlow[curLowestNeighbor], delta);
if (curLowestNeighbor == s) {
atomicSub(netFlowOutS, delta);
} else if (curLowestNeighbor == t) {
atomicAdd(netFlowInT, delta);
}
} else {
height[u] = neighborMinHeight + 1;
}
}
}
}
// Push-relabel algorithm to find max s-t flow. Based on lock-free implementation
// specified by Bo Hong. Uses one CUDA thread per vertex.
Flow *pushRelabelLockFreeGPU(Graph *g, int s, int t) {
int *residualFlow;
int *height;
int *excessFlow;
int *netFlowOutS;
int *netFlowInT;
int *tempHeights = (int *)calloc(g->n, sizeof(int));
int *tempExcessFlows = (int *)calloc(g->n, sizeof(int));
int *finalFlow = (int *)malloc((g->n * g->n) * sizeof(int));
memcpy(finalFlow, g->capacities, (g->n * g->n) * sizeof(int));
cudaCheckError(cudaMalloc((void **)&residualFlow, sizeof(int) * (g->n * g->n)));
cudaCheckError(cudaMalloc((void **)&height, sizeof(int) * g->n));
cudaCheckError(cudaMalloc((void **)&excessFlow, sizeof(int) * g->n));
cudaCheckError(cudaMalloc((void **)&netFlowOutS, sizeof(int)));
cudaCheckError(cudaMalloc((void **)&netFlowInT, sizeof(int)));
// initialize preflow
int flowOutS = 0;
int flowInT = 0;
tempHeights[s] = g->n;
#pragma omp parallel for reduction(+:flowOutS)
for (int v = 0; v < g->n; v++) {
int cap = g->capacities[IDX(s, v, g->n)];
if (cap > 0 && (s != v)) {
finalFlow[IDX(s, v, g->n)] = 0;
finalFlow[IDX(v, s, g->n)] += cap;
flowOutS += cap;
tempExcessFlows[v] = cap;
if (v == t) {
flowInT += cap;
}
}
}
cudaCheckError(cudaMemcpy(residualFlow, finalFlow, sizeof(int) * (g->n * g->n),
cudaMemcpyHostToDevice));
cudaCheckError(cudaMemcpy(height, tempHeights, sizeof(int) * g->n,
cudaMemcpyHostToDevice));
cudaCheckError(cudaMemcpy(excessFlow, tempExcessFlows, sizeof(int) * g->n,
cudaMemcpyHostToDevice));
cudaCheckError(cudaMemcpy(netFlowInT, &flowInT, sizeof(int),
cudaMemcpyHostToDevice));
cudaCheckError(cudaMemcpy(netFlowOutS, &flowOutS, sizeof(int),
cudaMemcpyHostToDevice));
int numBlocks = UPDIV((g->n - 2), threadsPerBlock.x);
pushRelabelLockFreeKernel<<<numBlocks, threadsPerBlock>>>(residualFlow,
height, excessFlow, netFlowOutS, netFlowInT, s, t, g->n);
free(tempHeights);
free(tempExcessFlows);
cudaCheckError(cudaThreadSynchronize());
cudaCheckError(cudaMemcpy(finalFlow, residualFlow, sizeof(int) * (g->n * g->n),
cudaMemcpyDeviceToHost));
cudaCheckError(cudaMemcpy(&flowInT, netFlowInT, sizeof(int),
cudaMemcpyDeviceToHost));
// now update flow to represent actual flow
#pragma omp parallel for schedule(static)
for (int i = 0; i < (g->n * g-> n); i++) {
finalFlow[i] = g->capacities[i] - finalFlow[i];
}
Flow *result = (Flow *)malloc(sizeof(Flow));
result->maxFlow = flowInT;
result->finalEdgeFlows = finalFlow;
cudaCheckError(cudaFree(residualFlow));
cudaCheckError(cudaFree(height));
cudaCheckError(cudaFree(excessFlow));
cudaCheckError(cudaFree(netFlowOutS));
cudaCheckError(cudaFree(netFlowInT));
return result;
} |
1d047dcabab3fb56cb4df7b3af335173406f90be.hip | // !!! This is a file automatically generated by hipify!!!
#include <stdio.h>
#include <stdlib.h>
#include <math.h>
#include <hip/hip_runtime_api.h>
#include <hiprand/hiprand.h>
#include "hiprand/hiprand_kernel.h"
#include <assert.h>
// L should be (multiple of (THR_NUMBER - 2) ) + 2
const int THR_NUMBER = 30;
#define SETBLOCKNUM 5
const int L = (THR_NUMBER -2)* SETBLOCKNUM +2;
// #define MULTISPIN unsigned char
#define MULTISPIN unsigned int
const int MULTISIZE = sizeof(MULTISPIN) *8;
#define T_CYCLE_START 2.26
#define T_CYCLE_END 2.275
#define T_CYCLE_STEP 0.002
#define SINGLETEMP 2.26918531
int n_temps = ( T_CYCLE_END - T_CYCLE_START )/ (T_CYCLE_STEP);
#define J 1.
#define SEED 1000
const int AREA = L*L;
const int NTOT = (L-2)*(L-2);
// static const float EXP4_TRESHOLD = exp( -(4.*J) / T);
// static const float EXP8_TRESHOLD = exp( -(8.*J) / T);
#define STEPS_REPEAT 2
#define T_MAX_SIM 20
#define T_MEASURE_WAIT 0
#define T_MEASURE_INTERVAL 4
#define MEASURE_MAG 1
#define MEASURE_CORR_LEN 1
// print history true/false
#define HISTORY 1
const int BLOCK_NUMBER = ( L-2)/( THR_NUMBER - 2 );
const dim3 BLOCKS( BLOCK_NUMBER, BLOCK_NUMBER );
const dim3 THREADS( THR_NUMBER, THR_NUMBER );
// average tracker struct
struct avg_tr {
float sum;
float sum_squares;
int n;
};
struct avg_tr new_avg_tr(int locn) {
struct avg_tr a = { .sum = 0, .sum_squares = 0, .n = locn};
return a;
}
void update_avg(struct avg_tr * tr_p, float newval) {
tr_p->sum += newval;
tr_p->sum_squares += (newval*newval);
}
float average( struct avg_tr tr) {
return (tr.sum)/((float) tr.n) ;
}
float stdev( struct avg_tr tr) {
return sqrt( ( tr.sum_squares)/((float) tr.n) - pow(( (tr.sum)/((float) tr.n) ),2) );
}
// float variance( struct avg_tr tr) {
// return ( ( tr.sum_squares)/((float) tr.n) - pow(( (tr.sum)/((float) tr.n) ),2) );
// }
// RNG init kernel
__global__ void initRNG(hiprandState_t * const rngStates, const int seed) {
// Determine thread ID
int blockId = blockIdx.x+ blockIdx.y * gridDim.x;
int tid = blockId * (blockDim.x * blockDim.y)+ (threadIdx.y * blockDim.x)+ threadIdx.x;
// Initialise the RNG
hiprand_init(seed, tid, 0, &rngStates[tid]);
}
struct coords {
int x;
int y;
};
__device__ coords dev_get_thread_coords() {
struct coords thread_coords;
thread_coords.x = blockIdx.x*( THR_NUMBER - 2 ) + ( threadIdx.x ) ;
thread_coords.y = blockIdx.y*( THR_NUMBER - 2 ) + ( threadIdx.y ) ;
return thread_coords;
}
// float unitrand(){
// return (float)rand() / (float)RAND_MAX;
// }
__device__ float dev_unitrand( hiprandState_t * const rngStates, unsigned int tid ){
hiprandState_t localState = rngStates[tid];
float val = hiprand_uniform(&localState);
rngStates[tid] = localState;
return val;
}
// index has to be less that MULTISIZE
__device__ void dev_set_spin_1 (MULTISPIN * multi, int index) {
*multi |= 1 << index;
}
__device__ void dev_set_spin_0 (MULTISPIN * multi, int index) {
*multi &= ~(1 << index);
}
__device__ MULTISPIN dev_read_spin(MULTISPIN multi, int index) {
// return (( multi >> ((MULTISPIN) index ) ) & ((MULTISPIN) 1));
// if (multi & (1 << index) == 0) {
// return 0;
// } else {
// return 1;
// }
return ( (multi >> index) & 1 );
}
// each bit exp8 and exp8 describes the Metropolis RNG result for that bit,
// specifying if the random r is bigger or smaller than the relevant values e^(4J/kT) and e^(8J/kT) (passed from outside)
__device__ MULTISPIN generate_exp4_mask(float exp4, float exp8, hiprandState_t * const rngStates, int tid ) {
MULTISPIN res;
for(int k=0; k<MULTISIZE; k++) {
float random_number = dev_unitrand(rngStates, tid);
if( exp4 > random_number && random_number > exp8) { // this is taken from the article and works. the version below might not but slightly simplifies some things
// if( exp4 > random_number) {
dev_set_spin_1(&res, k);
} else {
dev_set_spin_0(&res, k);
}
}
return res;
}
__device__ MULTISPIN generate_exp8_mask(float exp8, hiprandState_t * const rngStates, int tid ) {
MULTISPIN res;
for(int k=0; k<MULTISIZE; k++) {
float random_number = dev_unitrand(rngStates, tid);
if( random_number < exp8 ) {
dev_set_spin_1(&res, k);
} else {
dev_set_spin_0(&res, k);
}
}
return res;
}
MULTISPIN init_random_multispin() {
return (MULTISPIN) rand(); // just spam random bits
}
void init_random_grid(MULTISPIN grid[L*L]) {
for(int x = 0; x<L; x++) {
for(int y = 0; y<L; y++) {
grid[x+y*L] = init_random_multispin();
}
}
}
MULTISPIN init_t0_multispin() {
return (MULTISPIN) 0; // should be all zeros for all sensible multispin types
}
void init_t0_grid(MULTISPIN grid[L*L]) {
for(int x = 0; x<L; x++) {
for(int y = 0; y<L; y++) {
grid[x+y*L] = init_t0_multispin();
}
}
}
// void flip(MULTISPIN grid[L*L], int x, int y) {
// grid[x+y*L] = ~grid[x+y*L];
// }
// can segfault
__device__ static inline MULTISPIN dev_shared_grid_step(MULTISPIN shared_grid[THR_NUMBER*THR_NUMBER], int x, int y, int xstep, int ystep) {
return shared_grid[(x+xstep) + (y+ystep)*THR_NUMBER];
}
// segfault if applied to an edge spin, must be called only on the inner L-1 grid
__device__ void dev_update_multispin_shared(MULTISPIN grid[THR_NUMBER*THR_NUMBER], int x, int y, float exp4, float exp8, hiprandState_t * const rngStates, int tid ) {
MULTISPIN s0 = grid[x+y*THR_NUMBER];
MULTISPIN exp4_mask = generate_exp4_mask(exp4, exp8, rngStates, tid ); // here
MULTISPIN exp8_mask = generate_exp8_mask(exp8, rngStates, tid );
// "energy variables" indicating whether s0 is equal or opposite to each of its 4 neighbours
MULTISPIN i1 = s0 ^ dev_shared_grid_step(grid, x, y, 1, 0);
MULTISPIN i2 = s0 ^ dev_shared_grid_step(grid, x, y, -1, 0);
MULTISPIN i3 = s0 ^ dev_shared_grid_step(grid, x, y, 0, 1);
MULTISPIN i4 = s0 ^ dev_shared_grid_step(grid, x, y, 0, -1);
// bit sums with carry over between the i variables
MULTISPIN j1 = i1 & i2;
MULTISPIN j2 = i1 ^ i2;
MULTISPIN j3 = i3 & i4;
MULTISPIN j4 = i3 ^ i4;
// logic for deciding whether to flip s0 or not
MULTISPIN flip_mask = ( ((j1 | j3) | (~(j1^j3) & (j2&j4)) ) | ((j2 | j4) & exp4_mask ) | exp8_mask );
grid[x+y*THR_NUMBER] = grid[x+y*THR_NUMBER] ^ flip_mask;
// explanation:
// spins | i1234 | deltaE | j1 j2 j3 j4 |
// 1 | 1 | | |
// 101 | 1 1 | -8 | 1 0 1 0 |
// 1 | 1 | | |
// 0 | 0 | | |
// 101 | 1 1 | -4 | 0 1 1 0 | (j1 | j3)
// 1 | 1 | | |
// 0 | 0 | | 0 0 1 0 |
// 001 | 0 1 | 0 | or |-------------------------
// 1 | 1 | | 0 1 0 1 | ~(j1^j3) & (j2&j4))
// ------------------------------------------------------------------
// 0 | 0 | | |
// 000 | 0 0 | +4 | | (j2 | j4) & exp4
// 1 | 1 | | |
// ------------------------------------------------------------------
// 0 | 0 | | |
// 000 | 0 0 | +8 | 0 0 0 0 | exp8
// 0 | 0 | | |
// the first 2 cases are detected by (j1 | j3) and lead to the spin flip regardless of the RNG roll.
// the deltaH = 0 case can result in two different forms for the j's depending on ho the spins are paired.
// the first of these is correctly picked up by (j1 | j3), while the second needs its own expression ~(j1^j3) & (j2&j4))
// in the 4th case, detected by (j2 | j4), the spin is flipped only if the RNG roll is lucky enough (exp4 = 1)
// if we still haven't flipped, we get to the last case. here the spin is flipped only if the RNG roll gives the luckiest result (exp8 = 1).
}
// non GPU function
void multidump_first(MULTISPIN grid[L*L]) {
// printf("first bit grid (out of %i):\n", MULTISIZE);
for(int x = 0; x<L; x++) {
for(int y = 0; y<L; y++) {
if(( grid[x+y*L] & 1 ) == 0) printf(" ");
else printf("");
}
printf("\n");
}
printf("\n");
}
// non GPU function
void multidump_a_few(MULTISPIN grid[L*L]) {
for(int k=0; k<5; k++) {
printf("grid on bit %i (out of %i):\n", k, MULTISIZE);
for(int x = 0; x<L; x++) {
for(int y = 0; y<L; y++) {
if(( grid[x+y*L] & (1 << k) ) == 0) printf(" ");
else printf("");
}
printf("\n");
}
printf("\n");
}
}
__global__ void dev_measure_cycle_kernel(MULTISPIN * dev_grid, hiprandState_t * const rngStates, int * dev_single_run_corr_lens, int* dev_bin_counters, float * dev_single_run_avgs, int * dev_partial_res, float exp4, float exp8, int ksim ) {
// setup
struct coords glob_coords = dev_get_thread_coords();
int glob_x = glob_coords.x;
int glob_y = glob_coords.y;
// Determine thread ID (for RNG)
int blockId = blockIdx.x+ blockIdx.y * gridDim.x;
int tid = blockId * (blockDim.x * blockDim.y)+ (threadIdx.y * blockDim.x)+ threadIdx.x;
__shared__ MULTISPIN shared_grid[ THR_NUMBER*THR_NUMBER ];
shared_grid[ threadIdx.x + threadIdx.y*THR_NUMBER ] = dev_grid[(glob_x )+ (glob_y )*L ];
__syncthreads();
// allocate shared memory for measure results
// magnetization
__shared__ int blocksum[ MULTISIZE ];
if ( threadIdx.x == 0 && threadIdx.y == 0 ) {
for (int multik=0; multik<MULTISIZE; multik++) {
blocksum[ multik ] = 0;
}
}
__syncthreads();
////////////////////////////////////////////
////// measure
////////////////////////////////////////////
if(ksim > T_MEASURE_WAIT && ksim % T_MEASURE_INTERVAL == 0) {
// this condition does not depend on the thread id in any way
for (int multik=0; multik<MULTISIZE; multik++) {
// magnetization
if ( threadIdx.x != 0 && threadIdx.x != THR_NUMBER-1 && threadIdx.y != 0 && threadIdx.y != THR_NUMBER-1 ) {
int lspin = (int) dev_read_spin(shared_grid[threadIdx.x + threadIdx.y*THR_NUMBER], multik );
atomicAdd( &(blocksum[ multik ]), lspin ); // change with pointer arithm?
}
__syncthreads();
if ( threadIdx.x == 0 && threadIdx.y == 0 ) {
int blockntot = (THR_NUMBER-2)*(THR_NUMBER-2);
float nval = ((float) ( blocksum[ multik] *2 - blockntot ))/ ( (float) blockntot );
atomicAdd(&(dev_single_run_avgs[multik]), nval);
blocksum[ multik ] = 0;
}
// correlation length
int s0 = ( (int) dev_read_spin( dev_grid[ glob_x + glob_y*L ], multik ) )*2 -1;
for (int loop_x=1; loop_x<=(L-2); loop_x++) {
for (int loop_y=1; loop_y<=(L-2); loop_y++) {
float dist = sqrt( pow( glob_x - (float) loop_x, 2) + pow(glob_y - (float) loop_y, 2) );
if (dist > 1.0 && dist < (L-2)) {
int bin = (int) (floor(dist)) -1;
int s1 = ( (int) dev_read_spin( dev_grid[ loop_x + loop_y*L ], multik ) )*2 -1;
atomicAdd(&(dev_single_run_corr_lens[ multik*(L-2) + bin]), (s0 * s1)); // pointer arithm?
atomicAdd(&(dev_bin_counters[ multik*(L-2) + bin]), 1); // pointer arithm?
}
}
}
}
}
__syncthreads();
////////////////////////////////////////////
////// update
////////////////////////////////////////////
// macro-checkboards
// macro-white
if( (blockIdx.x + blockIdx.y%2)%2 == 0 ) {
/////////////
// checkboards
// update only in the inner 30x30 block of threads, because the edge threads aren't mapped to any grid spins
if ( threadIdx.x != 0 && threadIdx.x != THR_NUMBER-1 &&
threadIdx.y != 0 && threadIdx.y != THR_NUMBER-1 ) {
// white
if( (glob_x + glob_y%2)%2 == 0 ) {
dev_update_multispin_shared( shared_grid, threadIdx.x, threadIdx.y, exp4, exp8, rngStates, tid);
}
}
__syncthreads();
if ( threadIdx.x != 0 && threadIdx.x != THR_NUMBER-1 &&
threadIdx.y != 0 && threadIdx.y != THR_NUMBER-1 ) {
// black
if( (glob_x + glob_y%2)%2 == 1 ) {
dev_update_multispin_shared( shared_grid, threadIdx.x, threadIdx.y, exp4, exp8, rngStates, tid);
}
}
__syncthreads();
// if ( threadIdx.x > 0 && threadIdx.x != THR_NUMBER-1 &&
// threadIdx.y > 0 && threadIdx.y != THR_NUMBER-1 ) {
// dev_grid[(glob_x )+ (glob_y )*L ] = shared_grid[ threadIdx.x + threadIdx.y*THR_NUMBER ] ;
// }
//////////
}
__syncthreads();
// macro-black
if( (blockIdx.x + blockIdx.y%2)%2 == 1 ) {
//////////
// checkboards
// update only in the inner 30x30 block of threads, because the edge threads aren't mapped to any grid spins
if ( threadIdx.x != 0 && threadIdx.x != THR_NUMBER-1 &&
threadIdx.y != 0 && threadIdx.y != THR_NUMBER-1 ) {
// white
if( (glob_x + glob_y%2)%2 == 0 ) {
dev_update_multispin_shared( shared_grid, threadIdx.x, threadIdx.y, exp4, exp8, rngStates, tid);
}
}
__syncthreads();
if ( threadIdx.x != 0 && threadIdx.x != THR_NUMBER-1 &&
threadIdx.y != 0 && threadIdx.y != THR_NUMBER-1 ) {
// black
if( (glob_x + glob_y%2)%2 == 1 ) {
dev_update_multispin_shared( shared_grid, threadIdx.x, threadIdx.y, exp4, exp8, rngStates, tid);
}
}
__syncthreads();
}
if ( threadIdx.x > 0 && threadIdx.x != THR_NUMBER-1 &&
threadIdx.y > 0 && threadIdx.y != THR_NUMBER-1 ) {
dev_grid[(glob_x )+ (glob_y )*L ] = shared_grid[ threadIdx.x + threadIdx.y*THR_NUMBER ] ;
}
//////////
// __syncthreads();
}
void parall_measure_cycle(MULTISPIN startgrid[L*L], MULTISPIN * dev_grid, float exp4, float exp8, hiprandState_t * const rngStates, FILE *resf, FILE *corrf) {
float n_measures_per_sim = (float) ((T_MAX_SIM - T_MEASURE_WAIT)/T_MEASURE_INTERVAL);
// space for tracking the correlation length
int single_run_corr_lens[MULTISIZE*(L-2)];
for (int k=0; k<MULTISIZE*(L-2); k++) {single_run_corr_lens[k] = 0;}
int * dev_single_run_corr_lens;
hipMalloc(&dev_single_run_corr_lens, MULTISIZE*(L-2)*sizeof(int));
hipMemcpy(dev_single_run_corr_lens, &single_run_corr_lens, MULTISIZE*(L-2)*sizeof(int), hipMemcpyHostToDevice);
// extra space
int bin_counters[MULTISIZE*(L-2)];
for (int k=0; k<MULTISIZE*(L-2); k++) {bin_counters[k] = 0;}
int * dev_bin_counters;
hipMalloc(&dev_bin_counters, MULTISIZE*(L-2)*sizeof(int));
hipMemcpy(dev_bin_counters, &bin_counters, MULTISIZE*(L-2)*sizeof(int), hipMemcpyHostToDevice);
// space for tracking magnetization
float single_run_avgs[MULTISIZE];
for (int k=0; k<MULTISIZE; k++) {single_run_avgs[k] = 0.;}
float * dev_single_run_avgs;
hipMalloc(&dev_single_run_avgs, MULTISIZE*sizeof(float));
hipMemcpy(dev_single_run_avgs, &single_run_avgs, MULTISIZE*sizeof(float), hipMemcpyHostToDevice);
// extra space needed by update_magnetization
int partial_res[MULTISIZE];
for (int k=0; k<MULTISIZE; k++) {partial_res[k] = 0;}
int * dev_partial_res;
hipMalloc(&dev_partial_res, MULTISIZE*sizeof(int));
hipMemcpy(dev_partial_res, &partial_res, MULTISIZE*sizeof(int), hipMemcpyHostToDevice);
// outer average
struct avg_tr avg_of_runs = new_avg_tr( MULTISIZE * STEPS_REPEAT );
// corr len outer averages
struct avg_tr avg_of_corrs[L-2];
for (int lk=0; lk<(L-2); lk++) {
avg_of_corrs[lk] = new_avg_tr( MULTISIZE * STEPS_REPEAT );
}
for( int krep=0; krep< STEPS_REPEAT; krep++) {
if (HISTORY) printf("# simulation %i\n", krep+1);
if (HISTORY) printf("# waiting thermalization for the first %i sim steps.\n", T_MEASURE_WAIT);
hipMemcpy(dev_grid, startgrid, L*L*sizeof(MULTISPIN), hipMemcpyHostToDevice);
// kernel
for (int ksim=0; ksim<T_MAX_SIM; ksim++) {
hipLaunchKernelGGL(( dev_measure_cycle_kernel), dim3(BLOCKS), dim3(THREADS), 0, 0, dev_grid, rngStates, dev_single_run_corr_lens, dev_bin_counters, dev_single_run_avgs, dev_partial_res, exp4, exp8, ksim );
}
hipError_t err = hipGetLastError();
if (err != hipSuccess) {
printf("kernel: ERROR: %s\n", hipGetErrorString(err));
} else printf("kernel: no ERROR: %s\n", hipGetErrorString(err));
// results
// magnetization
hipMemcpy(&single_run_avgs, dev_single_run_avgs, MULTISIZE*sizeof(float), hipMemcpyDeviceToHost);
for(int multik=0; multik <MULTISIZE; multik++) {
float lres = single_run_avgs[multik] / (n_measures_per_sim * BLOCK_NUMBER*BLOCK_NUMBER); // change
if (HISTORY) printf("# average on bit %i\n: %f\n", multik+1, lres);
update_avg(&avg_of_runs, lres);
// reset averages
single_run_avgs[multik] = 0.;
partial_res[multik] = 0;
}
//reset on device
hipMemcpy(dev_single_run_avgs, &single_run_avgs, MULTISIZE*sizeof(float), hipMemcpyHostToDevice);
hipMemcpy(dev_partial_res, & partial_res, MULTISIZE*sizeof(int), hipMemcpyHostToDevice);
// corr len
hipMemcpy(&single_run_corr_lens, dev_single_run_corr_lens, MULTISIZE*(L-2)*sizeof(int), hipMemcpyDeviceToHost);
hipMemcpy(&bin_counters, dev_bin_counters, MULTISIZE*(L-2)*sizeof(int), hipMemcpyDeviceToHost);
for (int lk=0; lk<(L-2); lk++) {
for(int multik=0; multik <MULTISIZE; multik++) {
float lres = ((float) (single_run_corr_lens[multik*(L-2) + lk])) / ( (float)(bin_counters[multik*(L-2) + lk]) ); // change
if (HISTORY) printf("# correlation on bit %i for r = %i\n: %f\n", multik+1, lk+1, lres);
update_avg(&avg_of_corrs[lk], lres);
// reset averages
single_run_corr_lens[multik*(L-2) + lk] = 0.;
bin_counters[multik*(L-2) + lk] = 0.;
}
}
// reset on device
hipMemcpy(dev_single_run_corr_lens, &single_run_corr_lens, MULTISIZE*(L-2)*sizeof(int), hipMemcpyHostToDevice);
hipMemcpy(dev_bin_counters, &bin_counters, MULTISIZE*(L-2)*sizeof(int), hipMemcpyHostToDevice);
if (HISTORY) printf("# end simulation %i\n", krep+1);
}
// END OUTER REPETITION LOOP
// magn
float l2av = average(avg_of_runs);
float l2stdev = stdev(avg_of_runs);
if (HISTORY) printf("# overall average \n: %f +- %f\n", l2av, l2stdev);
fprintf(resf, "%f ", l2av);
fprintf(resf, "%f\n", l2stdev);
// corr len
for (int lk=0; lk<(L-2); lk++) {
float l2corr = average(avg_of_corrs[lk]);
float l2stdcorr = stdev(avg_of_corrs[lk]);
fprintf(corrf, "%i ", lk+1);
fprintf(corrf, "%f ", l2corr);
fprintf(corrf, "%f\n", l2stdcorr);
}
fprintf(corrf, "\n");
// grid for displaying end-state (of last rep only)
MULTISPIN endgrid[L*L];
hipMemcpy(endgrid, dev_grid, L*L*sizeof(MULTISPIN), hipMemcpyDeviceToHost);
if (HISTORY) multidump_first(endgrid);
hipFree(dev_partial_res);
hipFree(dev_single_run_avgs);
}
int main() {
// L should be (multiple of THR_NUMBER -2) + 2
assert( ((L-2)% (THR_NUMBER-2) )== 0 );
hipEvent_t start, stop;
hipEventCreate(&start);
hipEventCreate(&stop);
hipEventRecord(start);
FILE *resf = fopen("results.txt", "w");
fprintf(resf, "# gpu1\n");
fprintf(resf, "# parameters:\n# linear_size: %i\n", L);
fprintf(resf, "# coupling: %f\n# repetitions: %i\n", J, STEPS_REPEAT);
fprintf(resf, "# simulation_t_max: %i\n# thermalization_time: %i\n# time_between_measurements: %i\n# base_random_seed: %i\n", T_MAX_SIM,T_MEASURE_WAIT, T_MEASURE_INTERVAL, SEED);
fprintf(resf, "# extra:\n# area: %i\n# active_spins_excluding_boundaries:%i\n", AREA, NTOT);
fprintf(resf, "\n");
fprintf(resf, "# columns: temperature - average magnetization - uncertainty \n");
FILE *corrf = fopen("corr.txt", "w");
fprintf(corrf, "# gpu1\n");
fprintf(corrf, "# parameters:\n# linear_size: %i\n", L);
fprintf(corrf, "# coupling: %f\n# repetitions: %i\n", J, STEPS_REPEAT);
fprintf(corrf, "# simulation_t_max: %i\n# thermalization_time: %i\n# time_between_measurements: %i\n# base_random_seed: %i\n", T_MAX_SIM,T_MEASURE_WAIT, T_MEASURE_INTERVAL, SEED);
fprintf(corrf, "# extra:\n# area: %i\n# active_spins_excluding_boundaries:%i\n", AREA, NTOT);
fprintf(corrf, "\n");
fprintf(corrf, "# columns: temperature - average magnetization - uncertainty \n");
// still used for init_random_grid
srand(SEED);
// hiprand init
// Allocate memory for RNG states
hiprandState_t *d_rngStates = 0;
hipMalloc((void **)&d_rngStates, THR_NUMBER*THR_NUMBER*BLOCK_NUMBER*BLOCK_NUMBER*sizeof(hiprandState_t));
// Initialise RNG
hipLaunchKernelGGL(( initRNG), dim3(BLOCKS), dim3(THREADS), 0, 0, d_rngStates, SEED);
hipError_t err = hipGetLastError();
if (err != hipSuccess) {
printf("initRNG: ERROR: %s\n", hipGetErrorString(err));
} else printf("initRNG: no ERROR: %s\n", hipGetErrorString(err));
// device grid
MULTISPIN * dev_grid;
hipMalloc(&dev_grid, L*L*sizeof(MULTISPIN));
// original grid on the cpu
MULTISPIN startgrid[L*L];
init_t0_grid(startgrid);
// multidump_a_few(startgrid);
// // temp cycle:
// for( float kt=T_CYCLE_START; kt<T_CYCLE_END; kt+=T_CYCLE_STEP ) {
// const float EXP4 = exp( -(4.*J) / kt);
// const float EXP8 = exp( -(8.*J) / kt);
// fprintf(resf, "%f ", kt);
// fprintf(corrf, "\n#T = \n%f\n", kt);
// if (HISTORY) printf("temperature: %f\n", kt);
// parall_measure_cycle(startgrid, dev_grid, EXP4, EXP8, d_rngStates, resf, corrf);
// }
// // // // only 1:
// // // // just one:
const float EXP4 = exp( -(4.*J) / SINGLETEMP);
const float EXP8 = exp( -(8.*J) / SINGLETEMP);
fprintf(resf, "%f ", SINGLETEMP);
if (HISTORY) printf("temperature: %f\n", SINGLETEMP);
parall_measure_cycle(startgrid, dev_grid, EXP4, EXP8, d_rngStates, resf, corrf);
printf(" ERROR? rng malloc size: %i\n", THR_NUMBER*THR_NUMBER*BLOCK_NUMBER*BLOCK_NUMBER*sizeof(hiprandState_t));
printf(" ERROR? shared memory used: %i\n", THR_NUMBER*THR_NUMBER*sizeof(MULTISPIN) + BLOCK_NUMBER*BLOCK_NUMBER*MULTISIZE*sizeof(int));
hipFree(d_rngStates);
hipFree(dev_grid);
hipEventRecord(stop);
hipEventSynchronize(stop);
float total_time = 0;
hipEventElapsedTime(&total_time, start, stop);
FILE *timef = fopen("time.txt", "w");
long int total_flips = ((long int)(n_temps))* ((long int)((STEPS_REPEAT))) * ((long int)(T_MAX_SIM)) * ((long int)(MULTISIZE)) * ((long int)(NTOT));
fprintf(timef, "# gpu1\n");
fprintf(timef, "# total execution time (milliseconds):\n");
fprintf(timef, "%f\n", total_time);
fprintf(timef, "# total spin flips performed:\n");
fprintf(timef, "%li\n", total_flips);
fprintf(timef, "# average spin flips per millisecond:\n");
fprintf(timef, "%Lf\n", ((long double) total_flips )/( (long double) total_time ) );
fclose(timef);
fclose(resf);
return 0;
}
| 1d047dcabab3fb56cb4df7b3af335173406f90be.cu | #include <stdio.h>
#include <stdlib.h>
#include <math.h>
#include <cuda_runtime_api.h>
#include <curand.h>
#include "curand_kernel.h"
#include <assert.h>
// L should be (multiple of (THR_NUMBER - 2) ) + 2
const int THR_NUMBER = 30;
#define SETBLOCKNUM 5
const int L = (THR_NUMBER -2)* SETBLOCKNUM +2;
// #define MULTISPIN unsigned char
#define MULTISPIN unsigned int
const int MULTISIZE = sizeof(MULTISPIN) *8;
#define T_CYCLE_START 2.26
#define T_CYCLE_END 2.275
#define T_CYCLE_STEP 0.002
#define SINGLETEMP 2.26918531
int n_temps = ( T_CYCLE_END - T_CYCLE_START )/ (T_CYCLE_STEP);
#define J 1.
#define SEED 1000
const int AREA = L*L;
const int NTOT = (L-2)*(L-2);
// static const float EXP4_TRESHOLD = exp( -(4.*J) / T);
// static const float EXP8_TRESHOLD = exp( -(8.*J) / T);
#define STEPS_REPEAT 2
#define T_MAX_SIM 20
#define T_MEASURE_WAIT 0
#define T_MEASURE_INTERVAL 4
#define MEASURE_MAG 1
#define MEASURE_CORR_LEN 1
// print history true/false
#define HISTORY 1
const int BLOCK_NUMBER = ( L-2)/( THR_NUMBER - 2 );
const dim3 BLOCKS( BLOCK_NUMBER, BLOCK_NUMBER );
const dim3 THREADS( THR_NUMBER, THR_NUMBER );
// average tracker struct
struct avg_tr {
float sum;
float sum_squares;
int n;
};
struct avg_tr new_avg_tr(int locn) {
struct avg_tr a = { .sum = 0, .sum_squares = 0, .n = locn};
return a;
}
void update_avg(struct avg_tr * tr_p, float newval) {
tr_p->sum += newval;
tr_p->sum_squares += (newval*newval);
}
float average( struct avg_tr tr) {
return (tr.sum)/((float) tr.n) ;
}
float stdev( struct avg_tr tr) {
return sqrt( ( tr.sum_squares)/((float) tr.n) - pow(( (tr.sum)/((float) tr.n) ),2) );
}
// float variance( struct avg_tr tr) {
// return ( ( tr.sum_squares)/((float) tr.n) - pow(( (tr.sum)/((float) tr.n) ),2) );
// }
// RNG init kernel
__global__ void initRNG(curandState * const rngStates, const int seed) {
// Determine thread ID
int blockId = blockIdx.x+ blockIdx.y * gridDim.x;
int tid = blockId * (blockDim.x * blockDim.y)+ (threadIdx.y * blockDim.x)+ threadIdx.x;
// Initialise the RNG
curand_init(seed, tid, 0, &rngStates[tid]);
}
struct coords {
int x;
int y;
};
__device__ coords dev_get_thread_coords() {
struct coords thread_coords;
thread_coords.x = blockIdx.x*( THR_NUMBER - 2 ) + ( threadIdx.x ) ;
thread_coords.y = blockIdx.y*( THR_NUMBER - 2 ) + ( threadIdx.y ) ;
return thread_coords;
}
// float unitrand(){
// return (float)rand() / (float)RAND_MAX;
// }
__device__ float dev_unitrand( curandState * const rngStates, unsigned int tid ){
curandState localState = rngStates[tid];
float val = curand_uniform(&localState);
rngStates[tid] = localState;
return val;
}
// index has to be less that MULTISIZE
__device__ void dev_set_spin_1 (MULTISPIN * multi, int index) {
*multi |= 1 << index;
}
__device__ void dev_set_spin_0 (MULTISPIN * multi, int index) {
*multi &= ~(1 << index);
}
__device__ MULTISPIN dev_read_spin(MULTISPIN multi, int index) {
// return (( multi >> ((MULTISPIN) index ) ) & ((MULTISPIN) 1));
// if (multi & (1 << index) == 0) {
// return 0;
// } else {
// return 1;
// }
return ( (multi >> index) & 1 );
}
// each bit exp8 and exp8 describes the Metropolis RNG result for that bit,
// specifying if the random r is bigger or smaller than the relevant values e^(4J/kT) and e^(8J/kT) (passed from outside)
__device__ MULTISPIN generate_exp4_mask(float exp4, float exp8, curandState * const rngStates, int tid ) {
MULTISPIN res;
for(int k=0; k<MULTISIZE; k++) {
float random_number = dev_unitrand(rngStates, tid);
if( exp4 > random_number && random_number > exp8) { // this is taken from the article and works. the version below might not but slightly simplifies some things
// if( exp4 > random_number) {
dev_set_spin_1(&res, k);
} else {
dev_set_spin_0(&res, k);
}
}
return res;
}
__device__ MULTISPIN generate_exp8_mask(float exp8, curandState * const rngStates, int tid ) {
MULTISPIN res;
for(int k=0; k<MULTISIZE; k++) {
float random_number = dev_unitrand(rngStates, tid);
if( random_number < exp8 ) {
dev_set_spin_1(&res, k);
} else {
dev_set_spin_0(&res, k);
}
}
return res;
}
MULTISPIN init_random_multispin() {
return (MULTISPIN) rand(); // just spam random bits
}
void init_random_grid(MULTISPIN grid[L*L]) {
for(int x = 0; x<L; x++) {
for(int y = 0; y<L; y++) {
grid[x+y*L] = init_random_multispin();
}
}
}
MULTISPIN init_t0_multispin() {
return (MULTISPIN) 0; // should be all zeros for all sensible multispin types
}
void init_t0_grid(MULTISPIN grid[L*L]) {
for(int x = 0; x<L; x++) {
for(int y = 0; y<L; y++) {
grid[x+y*L] = init_t0_multispin();
}
}
}
// void flip(MULTISPIN grid[L*L], int x, int y) {
// grid[x+y*L] = ~grid[x+y*L];
// }
// can segfault
__device__ static inline MULTISPIN dev_shared_grid_step(MULTISPIN shared_grid[THR_NUMBER*THR_NUMBER], int x, int y, int xstep, int ystep) {
return shared_grid[(x+xstep) + (y+ystep)*THR_NUMBER];
}
// segfault if applied to an edge spin, must be called only on the inner L-1 grid
__device__ void dev_update_multispin_shared(MULTISPIN grid[THR_NUMBER*THR_NUMBER], int x, int y, float exp4, float exp8, curandState * const rngStates, int tid ) {
MULTISPIN s0 = grid[x+y*THR_NUMBER];
MULTISPIN exp4_mask = generate_exp4_mask(exp4, exp8, rngStates, tid ); // here
MULTISPIN exp8_mask = generate_exp8_mask(exp8, rngStates, tid );
// "energy variables" indicating whether s0 is equal or opposite to each of its 4 neighbours
MULTISPIN i1 = s0 ^ dev_shared_grid_step(grid, x, y, 1, 0);
MULTISPIN i2 = s0 ^ dev_shared_grid_step(grid, x, y, -1, 0);
MULTISPIN i3 = s0 ^ dev_shared_grid_step(grid, x, y, 0, 1);
MULTISPIN i4 = s0 ^ dev_shared_grid_step(grid, x, y, 0, -1);
// bit sums with carry over between the i variables
MULTISPIN j1 = i1 & i2;
MULTISPIN j2 = i1 ^ i2;
MULTISPIN j3 = i3 & i4;
MULTISPIN j4 = i3 ^ i4;
// logic for deciding whether to flip s0 or not
MULTISPIN flip_mask = ( ((j1 | j3) | (~(j1^j3) & (j2&j4)) ) | ((j2 | j4) & exp4_mask ) | exp8_mask );
grid[x+y*THR_NUMBER] = grid[x+y*THR_NUMBER] ^ flip_mask;
// explanation:
// spins | i1234 | deltaE | j1 j2 j3 j4 |
// 1 | 1 | | |
// 101 | 1 1 | -8 | 1 0 1 0 |
// 1 | 1 | | |
// 0 | 0 | | |
// 101 | 1 1 | -4 | 0 1 1 0 | (j1 | j3)
// 1 | 1 | | |
// 0 | 0 | | 0 0 1 0 |
// 001 | 0 1 | 0 | or |-------------------------
// 1 | 1 | | 0 1 0 1 | ~(j1^j3) & (j2&j4))
// ------------------------------------------------------------------
// 0 | 0 | | |
// 000 | 0 0 | +4 | | (j2 | j4) & exp4
// 1 | 1 | | |
// ------------------------------------------------------------------
// 0 | 0 | | |
// 000 | 0 0 | +8 | 0 0 0 0 | exp8
// 0 | 0 | | |
// the first 2 cases are detected by (j1 | j3) and lead to the spin flip regardless of the RNG roll.
// the deltaH = 0 case can result in two different forms for the j's depending on ho the spins are paired.
// the first of these is correctly picked up by (j1 | j3), while the second needs its own expression ~(j1^j3) & (j2&j4))
// in the 4th case, detected by (j2 | j4), the spin is flipped only if the RNG roll is lucky enough (exp4 = 1)
// if we still haven't flipped, we get to the last case. here the spin is flipped only if the RNG roll gives the luckiest result (exp8 = 1).
}
// non GPU function
void multidump_first(MULTISPIN grid[L*L]) {
// printf("first bit grid (out of %i):\n", MULTISIZE);
for(int x = 0; x<L; x++) {
for(int y = 0; y<L; y++) {
if(( grid[x+y*L] & 1 ) == 0) printf(" ");
else printf("█");
}
printf("\n");
}
printf("\n");
}
// non GPU function
void multidump_a_few(MULTISPIN grid[L*L]) {
for(int k=0; k<5; k++) {
printf("grid on bit %i (out of %i):\n", k, MULTISIZE);
for(int x = 0; x<L; x++) {
for(int y = 0; y<L; y++) {
if(( grid[x+y*L] & (1 << k) ) == 0) printf(" ");
else printf("█");
}
printf("\n");
}
printf("\n");
}
}
__global__ void dev_measure_cycle_kernel(MULTISPIN * dev_grid, curandState * const rngStates, int * dev_single_run_corr_lens, int* dev_bin_counters, float * dev_single_run_avgs, int * dev_partial_res, float exp4, float exp8, int ksim ) {
// setup
struct coords glob_coords = dev_get_thread_coords();
int glob_x = glob_coords.x;
int glob_y = glob_coords.y;
// Determine thread ID (for RNG)
int blockId = blockIdx.x+ blockIdx.y * gridDim.x;
int tid = blockId * (blockDim.x * blockDim.y)+ (threadIdx.y * blockDim.x)+ threadIdx.x;
__shared__ MULTISPIN shared_grid[ THR_NUMBER*THR_NUMBER ];
shared_grid[ threadIdx.x + threadIdx.y*THR_NUMBER ] = dev_grid[(glob_x )+ (glob_y )*L ];
__syncthreads();
// allocate shared memory for measure results
// magnetization
__shared__ int blocksum[ MULTISIZE ];
if ( threadIdx.x == 0 && threadIdx.y == 0 ) {
for (int multik=0; multik<MULTISIZE; multik++) {
blocksum[ multik ] = 0;
}
}
__syncthreads();
////////////////////////////////////////////
////// measure
////////////////////////////////////////////
if(ksim > T_MEASURE_WAIT && ksim % T_MEASURE_INTERVAL == 0) {
// this condition does not depend on the thread id in any way
for (int multik=0; multik<MULTISIZE; multik++) {
// magnetization
if ( threadIdx.x != 0 && threadIdx.x != THR_NUMBER-1 && threadIdx.y != 0 && threadIdx.y != THR_NUMBER-1 ) {
int lspin = (int) dev_read_spin(shared_grid[threadIdx.x + threadIdx.y*THR_NUMBER], multik );
atomicAdd( &(blocksum[ multik ]), lspin ); // change with pointer arithm?
}
__syncthreads();
if ( threadIdx.x == 0 && threadIdx.y == 0 ) {
int blockntot = (THR_NUMBER-2)*(THR_NUMBER-2);
float nval = ((float) ( blocksum[ multik] *2 - blockntot ))/ ( (float) blockntot );
atomicAdd(&(dev_single_run_avgs[multik]), nval);
blocksum[ multik ] = 0;
}
// correlation length
int s0 = ( (int) dev_read_spin( dev_grid[ glob_x + glob_y*L ], multik ) )*2 -1;
for (int loop_x=1; loop_x<=(L-2); loop_x++) {
for (int loop_y=1; loop_y<=(L-2); loop_y++) {
float dist = sqrt( pow( glob_x - (float) loop_x, 2) + pow(glob_y - (float) loop_y, 2) );
if (dist > 1.0 && dist < (L-2)) {
int bin = (int) (floor(dist)) -1;
int s1 = ( (int) dev_read_spin( dev_grid[ loop_x + loop_y*L ], multik ) )*2 -1;
atomicAdd(&(dev_single_run_corr_lens[ multik*(L-2) + bin]), (s0 * s1)); // pointer arithm?
atomicAdd(&(dev_bin_counters[ multik*(L-2) + bin]), 1); // pointer arithm?
}
}
}
}
}
__syncthreads();
////////////////////////////////////////////
////// update
////////////////////////////////////////////
// macro-checkboards
// macro-white
if( (blockIdx.x + blockIdx.y%2)%2 == 0 ) {
/////////////
// checkboards
// update only in the inner 30x30 block of threads, because the edge threads aren't mapped to any grid spins
if ( threadIdx.x != 0 && threadIdx.x != THR_NUMBER-1 &&
threadIdx.y != 0 && threadIdx.y != THR_NUMBER-1 ) {
// white
if( (glob_x + glob_y%2)%2 == 0 ) {
dev_update_multispin_shared( shared_grid, threadIdx.x, threadIdx.y, exp4, exp8, rngStates, tid);
}
}
__syncthreads();
if ( threadIdx.x != 0 && threadIdx.x != THR_NUMBER-1 &&
threadIdx.y != 0 && threadIdx.y != THR_NUMBER-1 ) {
// black
if( (glob_x + glob_y%2)%2 == 1 ) {
dev_update_multispin_shared( shared_grid, threadIdx.x, threadIdx.y, exp4, exp8, rngStates, tid);
}
}
__syncthreads();
// if ( threadIdx.x > 0 && threadIdx.x != THR_NUMBER-1 &&
// threadIdx.y > 0 && threadIdx.y != THR_NUMBER-1 ) {
// dev_grid[(glob_x )+ (glob_y )*L ] = shared_grid[ threadIdx.x + threadIdx.y*THR_NUMBER ] ;
// }
//////////
}
__syncthreads();
// macro-black
if( (blockIdx.x + blockIdx.y%2)%2 == 1 ) {
//////////
// checkboards
// update only in the inner 30x30 block of threads, because the edge threads aren't mapped to any grid spins
if ( threadIdx.x != 0 && threadIdx.x != THR_NUMBER-1 &&
threadIdx.y != 0 && threadIdx.y != THR_NUMBER-1 ) {
// white
if( (glob_x + glob_y%2)%2 == 0 ) {
dev_update_multispin_shared( shared_grid, threadIdx.x, threadIdx.y, exp4, exp8, rngStates, tid);
}
}
__syncthreads();
if ( threadIdx.x != 0 && threadIdx.x != THR_NUMBER-1 &&
threadIdx.y != 0 && threadIdx.y != THR_NUMBER-1 ) {
// black
if( (glob_x + glob_y%2)%2 == 1 ) {
dev_update_multispin_shared( shared_grid, threadIdx.x, threadIdx.y, exp4, exp8, rngStates, tid);
}
}
__syncthreads();
}
if ( threadIdx.x > 0 && threadIdx.x != THR_NUMBER-1 &&
threadIdx.y > 0 && threadIdx.y != THR_NUMBER-1 ) {
dev_grid[(glob_x )+ (glob_y )*L ] = shared_grid[ threadIdx.x + threadIdx.y*THR_NUMBER ] ;
}
//////////
// __syncthreads();
}
void parall_measure_cycle(MULTISPIN startgrid[L*L], MULTISPIN * dev_grid, float exp4, float exp8, curandState * const rngStates, FILE *resf, FILE *corrf) {
float n_measures_per_sim = (float) ((T_MAX_SIM - T_MEASURE_WAIT)/T_MEASURE_INTERVAL);
// space for tracking the correlation length
int single_run_corr_lens[MULTISIZE*(L-2)];
for (int k=0; k<MULTISIZE*(L-2); k++) {single_run_corr_lens[k] = 0;}
int * dev_single_run_corr_lens;
cudaMalloc(&dev_single_run_corr_lens, MULTISIZE*(L-2)*sizeof(int));
cudaMemcpy(dev_single_run_corr_lens, &single_run_corr_lens, MULTISIZE*(L-2)*sizeof(int), cudaMemcpyHostToDevice);
// extra space
int bin_counters[MULTISIZE*(L-2)];
for (int k=0; k<MULTISIZE*(L-2); k++) {bin_counters[k] = 0;}
int * dev_bin_counters;
cudaMalloc(&dev_bin_counters, MULTISIZE*(L-2)*sizeof(int));
cudaMemcpy(dev_bin_counters, &bin_counters, MULTISIZE*(L-2)*sizeof(int), cudaMemcpyHostToDevice);
// space for tracking magnetization
float single_run_avgs[MULTISIZE];
for (int k=0; k<MULTISIZE; k++) {single_run_avgs[k] = 0.;}
float * dev_single_run_avgs;
cudaMalloc(&dev_single_run_avgs, MULTISIZE*sizeof(float));
cudaMemcpy(dev_single_run_avgs, &single_run_avgs, MULTISIZE*sizeof(float), cudaMemcpyHostToDevice);
// extra space needed by update_magnetization
int partial_res[MULTISIZE];
for (int k=0; k<MULTISIZE; k++) {partial_res[k] = 0;}
int * dev_partial_res;
cudaMalloc(&dev_partial_res, MULTISIZE*sizeof(int));
cudaMemcpy(dev_partial_res, &partial_res, MULTISIZE*sizeof(int), cudaMemcpyHostToDevice);
// outer average
struct avg_tr avg_of_runs = new_avg_tr( MULTISIZE * STEPS_REPEAT );
// corr len outer averages
struct avg_tr avg_of_corrs[L-2];
for (int lk=0; lk<(L-2); lk++) {
avg_of_corrs[lk] = new_avg_tr( MULTISIZE * STEPS_REPEAT );
}
for( int krep=0; krep< STEPS_REPEAT; krep++) {
if (HISTORY) printf("# simulation %i\n", krep+1);
if (HISTORY) printf("# waiting thermalization for the first %i sim steps.\n", T_MEASURE_WAIT);
cudaMemcpy(dev_grid, startgrid, L*L*sizeof(MULTISPIN), cudaMemcpyHostToDevice);
// kernel
for (int ksim=0; ksim<T_MAX_SIM; ksim++) {
dev_measure_cycle_kernel<<<BLOCKS, THREADS>>>(dev_grid, rngStates, dev_single_run_corr_lens, dev_bin_counters, dev_single_run_avgs, dev_partial_res, exp4, exp8, ksim );
}
cudaError_t err = cudaGetLastError();
if (err != cudaSuccess) {
printf("kernel: ERROR: %s\n", cudaGetErrorString(err));
} else printf("kernel: no ERROR: %s\n", cudaGetErrorString(err));
// results
// magnetization
cudaMemcpy(&single_run_avgs, dev_single_run_avgs, MULTISIZE*sizeof(float), cudaMemcpyDeviceToHost);
for(int multik=0; multik <MULTISIZE; multik++) {
float lres = single_run_avgs[multik] / (n_measures_per_sim * BLOCK_NUMBER*BLOCK_NUMBER); // change
if (HISTORY) printf("# average on bit %i\n: %f\n", multik+1, lres);
update_avg(&avg_of_runs, lres);
// reset averages
single_run_avgs[multik] = 0.;
partial_res[multik] = 0;
}
//reset on device
cudaMemcpy(dev_single_run_avgs, &single_run_avgs, MULTISIZE*sizeof(float), cudaMemcpyHostToDevice);
cudaMemcpy(dev_partial_res, & partial_res, MULTISIZE*sizeof(int), cudaMemcpyHostToDevice);
// corr len
cudaMemcpy(&single_run_corr_lens, dev_single_run_corr_lens, MULTISIZE*(L-2)*sizeof(int), cudaMemcpyDeviceToHost);
cudaMemcpy(&bin_counters, dev_bin_counters, MULTISIZE*(L-2)*sizeof(int), cudaMemcpyDeviceToHost);
for (int lk=0; lk<(L-2); lk++) {
for(int multik=0; multik <MULTISIZE; multik++) {
float lres = ((float) (single_run_corr_lens[multik*(L-2) + lk])) / ( (float)(bin_counters[multik*(L-2) + lk]) ); // change
if (HISTORY) printf("# correlation on bit %i for r = %i\n: %f\n", multik+1, lk+1, lres);
update_avg(&avg_of_corrs[lk], lres);
// reset averages
single_run_corr_lens[multik*(L-2) + lk] = 0.;
bin_counters[multik*(L-2) + lk] = 0.;
}
}
// reset on device
cudaMemcpy(dev_single_run_corr_lens, &single_run_corr_lens, MULTISIZE*(L-2)*sizeof(int), cudaMemcpyHostToDevice);
cudaMemcpy(dev_bin_counters, &bin_counters, MULTISIZE*(L-2)*sizeof(int), cudaMemcpyHostToDevice);
if (HISTORY) printf("# end simulation %i\n", krep+1);
}
// END OUTER REPETITION LOOP
// magn
float l2av = average(avg_of_runs);
float l2stdev = stdev(avg_of_runs);
if (HISTORY) printf("# overall average \n: %f +- %f\n", l2av, l2stdev);
fprintf(resf, "%f ", l2av);
fprintf(resf, "%f\n", l2stdev);
// corr len
for (int lk=0; lk<(L-2); lk++) {
float l2corr = average(avg_of_corrs[lk]);
float l2stdcorr = stdev(avg_of_corrs[lk]);
fprintf(corrf, "%i ", lk+1);
fprintf(corrf, "%f ", l2corr);
fprintf(corrf, "%f\n", l2stdcorr);
}
fprintf(corrf, "\n");
// grid for displaying end-state (of last rep only)
MULTISPIN endgrid[L*L];
cudaMemcpy(endgrid, dev_grid, L*L*sizeof(MULTISPIN), cudaMemcpyDeviceToHost);
if (HISTORY) multidump_first(endgrid);
cudaFree(dev_partial_res);
cudaFree(dev_single_run_avgs);
}
int main() {
// L should be (multiple of THR_NUMBER -2) + 2
assert( ((L-2)% (THR_NUMBER-2) )== 0 );
cudaEvent_t start, stop;
cudaEventCreate(&start);
cudaEventCreate(&stop);
cudaEventRecord(start);
FILE *resf = fopen("results.txt", "w");
fprintf(resf, "# gpu1\n");
fprintf(resf, "# parameters:\n# linear_size: %i\n", L);
fprintf(resf, "# coupling: %f\n# repetitions: %i\n", J, STEPS_REPEAT);
fprintf(resf, "# simulation_t_max: %i\n# thermalization_time: %i\n# time_between_measurements: %i\n# base_random_seed: %i\n", T_MAX_SIM,T_MEASURE_WAIT, T_MEASURE_INTERVAL, SEED);
fprintf(resf, "# extra:\n# area: %i\n# active_spins_excluding_boundaries:%i\n", AREA, NTOT);
fprintf(resf, "\n");
fprintf(resf, "# columns: temperature - average magnetization - uncertainty \n");
FILE *corrf = fopen("corr.txt", "w");
fprintf(corrf, "# gpu1\n");
fprintf(corrf, "# parameters:\n# linear_size: %i\n", L);
fprintf(corrf, "# coupling: %f\n# repetitions: %i\n", J, STEPS_REPEAT);
fprintf(corrf, "# simulation_t_max: %i\n# thermalization_time: %i\n# time_between_measurements: %i\n# base_random_seed: %i\n", T_MAX_SIM,T_MEASURE_WAIT, T_MEASURE_INTERVAL, SEED);
fprintf(corrf, "# extra:\n# area: %i\n# active_spins_excluding_boundaries:%i\n", AREA, NTOT);
fprintf(corrf, "\n");
fprintf(corrf, "# columns: temperature - average magnetization - uncertainty \n");
// still used for init_random_grid
srand(SEED);
// curand init
// Allocate memory for RNG states
curandState *d_rngStates = 0;
cudaMalloc((void **)&d_rngStates, THR_NUMBER*THR_NUMBER*BLOCK_NUMBER*BLOCK_NUMBER*sizeof(curandState));
// Initialise RNG
initRNG<<<BLOCKS, THREADS>>>(d_rngStates, SEED);
cudaError_t err = cudaGetLastError();
if (err != cudaSuccess) {
printf("initRNG: ERROR: %s\n", cudaGetErrorString(err));
} else printf("initRNG: no ERROR: %s\n", cudaGetErrorString(err));
// device grid
MULTISPIN * dev_grid;
cudaMalloc(&dev_grid, L*L*sizeof(MULTISPIN));
// original grid on the cpu
MULTISPIN startgrid[L*L];
init_t0_grid(startgrid);
// multidump_a_few(startgrid);
// // temp cycle:
// for( float kt=T_CYCLE_START; kt<T_CYCLE_END; kt+=T_CYCLE_STEP ) {
// const float EXP4 = exp( -(4.*J) / kt);
// const float EXP8 = exp( -(8.*J) / kt);
// fprintf(resf, "%f ", kt);
// fprintf(corrf, "\n#T = \n%f\n", kt);
// if (HISTORY) printf("temperature: %f\n", kt);
// parall_measure_cycle(startgrid, dev_grid, EXP4, EXP8, d_rngStates, resf, corrf);
// }
// // // // only 1:
// // // // just one:
const float EXP4 = exp( -(4.*J) / SINGLETEMP);
const float EXP8 = exp( -(8.*J) / SINGLETEMP);
fprintf(resf, "%f ", SINGLETEMP);
if (HISTORY) printf("temperature: %f\n", SINGLETEMP);
parall_measure_cycle(startgrid, dev_grid, EXP4, EXP8, d_rngStates, resf, corrf);
printf(" ERROR? rng malloc size: %i\n", THR_NUMBER*THR_NUMBER*BLOCK_NUMBER*BLOCK_NUMBER*sizeof(curandState));
printf(" ERROR? shared memory used: %i\n", THR_NUMBER*THR_NUMBER*sizeof(MULTISPIN) + BLOCK_NUMBER*BLOCK_NUMBER*MULTISIZE*sizeof(int));
cudaFree(d_rngStates);
cudaFree(dev_grid);
cudaEventRecord(stop);
cudaEventSynchronize(stop);
float total_time = 0;
cudaEventElapsedTime(&total_time, start, stop);
FILE *timef = fopen("time.txt", "w");
long int total_flips = ((long int)(n_temps))* ((long int)((STEPS_REPEAT))) * ((long int)(T_MAX_SIM)) * ((long int)(MULTISIZE)) * ((long int)(NTOT));
fprintf(timef, "# gpu1\n");
fprintf(timef, "# total execution time (milliseconds):\n");
fprintf(timef, "%f\n", total_time);
fprintf(timef, "# total spin flips performed:\n");
fprintf(timef, "%li\n", total_flips);
fprintf(timef, "# average spin flips per millisecond:\n");
fprintf(timef, "%Lf\n", ((long double) total_flips )/( (long double) total_time ) );
fclose(timef);
fclose(resf);
return 0;
}
|
288bc4e628941e0f8632e943e51d5eab15a06e89.hip | // !!! This is a file automatically generated by hipify!!!
#include <stdio.h>
#include <stdlib.h>
#include <hip/hip_runtime.h>
#include <iostream>
#include <fstream>
__global__ void simple_histo(unsigned int * d_bins, unsigned int * d_in, unsigned int BIN_SIZE, unsigned int IN_SIZE)
{
unsigned int myId = threadIdx.x + blockDim.x * blockIdx.x;
// checking for out-of-bounds
if (myId>=(1<<29))
{
return;
}
unsigned int myItem = d_in[myId];
unsigned int myBin = myItem % BIN_SIZE;
atomicAdd(&(d_bins[myBin]), 1);
}
__global__ void bad_histo(unsigned int * d_bins, unsigned int * d_in, unsigned int BIN_SIZE, unsigned int IN_SIZE)
{
unsigned int myId = threadIdx.x + blockDim.x * blockIdx.x;
// checking for out-of-bounds
if (myId>=IN_SIZE)
{
return;
}
unsigned int myItem = d_in[myId];
unsigned int myBin = myItem % BIN_SIZE;
d_bins[myBin]++;
}
int main(int argc, char **argv)
{
std::ofstream myfile;
myfile.open ("par_histogram.csv");
// printf("---STARTED---\n");
unsigned int times = 10;
// Vars
unsigned int IN_SIZE;
unsigned int IN_BYTES;
unsigned int BIN_SIZE;
unsigned int BIN_BYTES;
unsigned int NUM_THREADS;
unsigned int NUM_BLOCKS;
unsigned int j;
unsigned int sum;
for(unsigned int rounds = 0; rounds<30; rounds++)
{
IN_SIZE = 1<<29;
IN_BYTES = sizeof(unsigned int) * IN_SIZE;
BIN_SIZE = 1<<rounds;
BIN_BYTES = sizeof(unsigned int) * BIN_SIZE;
NUM_THREADS = 1<<10;
NUM_BLOCKS = IN_SIZE/NUM_THREADS + ((IN_SIZE % NUM_THREADS)?1:0);
// Generate the input array on host
unsigned int * h_in = (unsigned int *)malloc(IN_BYTES);
unsigned int * h_bins = (unsigned int *)malloc(BIN_BYTES);
for (j = 0; j<IN_SIZE; j++) {h_in[j] = j;} //printf(" h_in[%d]: %d\n", j, h_in[j]);}
// Declare GPU memory pointers
/* printf("\n@@@ROUND@@@: %d\n", rounds);
printf("---IN_SIZE---: %d\n", IN_SIZE);
printf("---IN_BYTES---: %d\n", IN_BYTES);
printf("---BIN_SIZE---: %d\n", BIN_SIZE);
printf("---BIN_BYTES---: %d\n", BIN_BYTES);
printf("---THREAD_SIZE---: %d\n", NUM_THREADS);
printf("---NUM_BLOCKS---: %d\n", NUM_BLOCKS);
*/
unsigned * d_in;
unsigned * d_bins;
// Allocate GPU memory
hipMalloc(&d_in, IN_BYTES);
// printf("---ALLOCATED D_IN---\n");
hipMalloc(&d_bins, BIN_BYTES);
// printf("---ALLOCATED D_IN---\n");
// Transfer the arrays to the GPU
hipMemcpy(d_in, h_in, IN_BYTES, hipMemcpyHostToDevice);
hipEvent_t start, stop;
hipEventCreate(&start);
hipEventCreate(&stop);
hipEventRecord(start, 0);
// running the code on the GPU $times times
for (unsigned int k = 0; k<times; k++)
{
hipMemset(d_bins, 0, BIN_BYTES);
hipLaunchKernelGGL(( simple_histo), dim3(NUM_BLOCKS), dim3(NUM_THREADS), 0, 0, d_bins, d_in, BIN_SIZE, IN_SIZE);
}
hipEventRecord(stop, 0);
hipEventSynchronize(stop);
// calculating time
float elapsedTime = .0f;
hipEventElapsedTime(&elapsedTime, start, stop);
elapsedTime = elapsedTime / ((float) times);
// printf(" time: %.5f\n", elapsedTime);
// Copy back to HOST
hipMemcpy(h_bins, d_bins, BIN_BYTES, hipMemcpyDeviceToHost);
sum = 0;
for(unsigned int i = 0; i<BIN_SIZE; i++){sum += h_bins[i];}
for(unsigned int i = 0; (i<BIN_SIZE) && (i<10); i++)
{
printf("bin %d: count %d\n", i, h_bins[i]);
}
printf("%d\n", sum);
// free GPU memory allocation
hipFree(d_in);
hipFree(d_bins);
free(h_in);
free(h_bins);
myfile << elapsedTime << ",";
}
myfile.close();
return 0;
}
| 288bc4e628941e0f8632e943e51d5eab15a06e89.cu | #include <stdio.h>
#include <stdlib.h>
#include <cuda_runtime.h>
#include <iostream>
#include <fstream>
__global__ void simple_histo(unsigned int * d_bins, unsigned int * d_in, unsigned int BIN_SIZE, unsigned int IN_SIZE)
{
unsigned int myId = threadIdx.x + blockDim.x * blockIdx.x;
// checking for out-of-bounds
if (myId>=(1<<29))
{
return;
}
unsigned int myItem = d_in[myId];
unsigned int myBin = myItem % BIN_SIZE;
atomicAdd(&(d_bins[myBin]), 1);
}
__global__ void bad_histo(unsigned int * d_bins, unsigned int * d_in, unsigned int BIN_SIZE, unsigned int IN_SIZE)
{
unsigned int myId = threadIdx.x + blockDim.x * blockIdx.x;
// checking for out-of-bounds
if (myId>=IN_SIZE)
{
return;
}
unsigned int myItem = d_in[myId];
unsigned int myBin = myItem % BIN_SIZE;
d_bins[myBin]++;
}
int main(int argc, char **argv)
{
std::ofstream myfile;
myfile.open ("par_histogram.csv");
// printf("---STARTED---\n");
unsigned int times = 10;
// Vars
unsigned int IN_SIZE;
unsigned int IN_BYTES;
unsigned int BIN_SIZE;
unsigned int BIN_BYTES;
unsigned int NUM_THREADS;
unsigned int NUM_BLOCKS;
unsigned int j;
unsigned int sum;
for(unsigned int rounds = 0; rounds<30; rounds++)
{
IN_SIZE = 1<<29;
IN_BYTES = sizeof(unsigned int) * IN_SIZE;
BIN_SIZE = 1<<rounds;
BIN_BYTES = sizeof(unsigned int) * BIN_SIZE;
NUM_THREADS = 1<<10;
NUM_BLOCKS = IN_SIZE/NUM_THREADS + ((IN_SIZE % NUM_THREADS)?1:0);
// Generate the input array on host
unsigned int * h_in = (unsigned int *)malloc(IN_BYTES);
unsigned int * h_bins = (unsigned int *)malloc(BIN_BYTES);
for (j = 0; j<IN_SIZE; j++) {h_in[j] = j;} //printf(" h_in[%d]: %d\n", j, h_in[j]);}
// Declare GPU memory pointers
/* printf("\n@@@ROUND@@@: %d\n", rounds);
printf("---IN_SIZE---: %d\n", IN_SIZE);
printf("---IN_BYTES---: %d\n", IN_BYTES);
printf("---BIN_SIZE---: %d\n", BIN_SIZE);
printf("---BIN_BYTES---: %d\n", BIN_BYTES);
printf("---THREAD_SIZE---: %d\n", NUM_THREADS);
printf("---NUM_BLOCKS---: %d\n", NUM_BLOCKS);
*/
unsigned * d_in;
unsigned * d_bins;
// Allocate GPU memory
cudaMalloc(&d_in, IN_BYTES);
// printf("---ALLOCATED D_IN---\n");
cudaMalloc(&d_bins, BIN_BYTES);
// printf("---ALLOCATED D_IN---\n");
// Transfer the arrays to the GPU
cudaMemcpy(d_in, h_in, IN_BYTES, cudaMemcpyHostToDevice);
cudaEvent_t start, stop;
cudaEventCreate(&start);
cudaEventCreate(&stop);
cudaEventRecord(start, 0);
// running the code on the GPU $times times
for (unsigned int k = 0; k<times; k++)
{
cudaMemset(d_bins, 0, BIN_BYTES);
simple_histo<<<NUM_BLOCKS, NUM_THREADS>>>(d_bins, d_in, BIN_SIZE, IN_SIZE);
}
cudaEventRecord(stop, 0);
cudaEventSynchronize(stop);
// calculating time
float elapsedTime = .0f;
cudaEventElapsedTime(&elapsedTime, start, stop);
elapsedTime = elapsedTime / ((float) times);
// printf(" time: %.5f\n", elapsedTime);
// Copy back to HOST
cudaMemcpy(h_bins, d_bins, BIN_BYTES, cudaMemcpyDeviceToHost);
sum = 0;
for(unsigned int i = 0; i<BIN_SIZE; i++){sum += h_bins[i];}
for(unsigned int i = 0; (i<BIN_SIZE) && (i<10); i++)
{
printf("bin %d: count %d\n", i, h_bins[i]);
}
printf("%d\n", sum);
// free GPU memory allocation
cudaFree(d_in);
cudaFree(d_bins);
free(h_in);
free(h_bins);
myfile << elapsedTime << ",";
}
myfile.close();
return 0;
}
|
10d909bd27bbde8436e81f589d783a87d2c4a748.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/*
-- MAGMA (version 1.5.0) --
Univ. of Tennessee, Knoxville
Univ. of California, Berkeley
Univ. of Colorado, Denver
@date September 2014
@precisions mixed zc -> ds
*/
#include "common_magma.h"
#define NB 64
// TODO check precision, as in zlag2c?
__global__ void
zclaswp_kernel(int n, magmaDoubleComplex *A, int lda, magmaFloatComplex *SA, int m, const magma_int_t *ipiv)
{
int ind = blockIdx.x*NB + threadIdx.x;
int newind;
magmaFloatComplex res;
if (ind < m) {
SA += ind;
ipiv += ind;
newind = ipiv[0];
for(int i=0; i < n; i++) {
res = MAGMA_C_MAKE( (float)cuCreal(A[newind+i*lda]),
(float)cuCimag(A[newind+i*lda]) );
SA[i*lda] = res;
}
}
}
__global__ void
zclaswp_inv_kernel(int n, magmaDoubleComplex *A, int lda, magmaFloatComplex *SA, int m, const magma_int_t *ipiv)
{
int ind = blockIdx.x*NB + threadIdx.x;
int newind;
magmaDoubleComplex res;
if (ind < m) {
A += ind;
ipiv += ind;
newind = ipiv[0];
for(int i=0; i < n; i++) {
res = MAGMA_Z_MAKE( (double)cuCrealf(SA[newind+i*lda]),
(double)cuCimagf(SA[newind+i*lda]) );
A[i*lda] = res;
}
}
}
/**
Purpose
-------
Row i of A is cast to single precision in row ipiv[i] of SA (incx > 0), or
row i of SA is cast to double precision in row ipiv[i] of A (incx < 0),
for 0 <= i < M.
@param[in]
n INTEGER.
On entry, N specifies the number of columns of the matrix A.
@param[in,out]
A DOUBLE PRECISION array on the GPU, dimension (LDA,N)
On entry, the M-by-N matrix to which the row interchanges will be applied.
TODO update docs
@param[in]
lda INTEGER.
LDA specifies the leading dimension of A.
@param[in,out]
SA REAL array on the GPU, dimension (LDA,N)
On exit, the single precision, permuted matrix.
TODO update docs
@param[in]
m The number of rows to be interchanged.
@param[in]
ipiv INTEGER array on the GPU, dimension (M)
The vector of pivot indices. Row i of A is cast to single
precision in row ipiv[i] of SA, for 0 <= i < m.
@param[in]
incx INTEGER
If INCX is negative, the pivots are applied in reverse order,
otherwise in straight-forward order.
@param[in]
queue magma_queue_t
Queue to execute in.
@ingroup magma_zaux2
********************************************************************/
extern "C" void
magmablas_zclaswp_q(
magma_int_t n, magmaDoubleComplex *A, magma_int_t lda,
magmaFloatComplex *SA, magma_int_t m,
const magma_int_t *ipiv, magma_int_t incx,
magma_queue_t queue )
{
int blocks = (m - 1)/NB + 1;
dim3 grid(blocks, 1, 1);
dim3 threads(NB, 1, 1);
if (incx >= 0)
hipLaunchKernelGGL(( zclaswp_kernel), dim3(grid), dim3(threads), 0, queue , n, A, lda, SA, m, ipiv);
else
hipLaunchKernelGGL(( zclaswp_inv_kernel), dim3(grid), dim3(threads), 0, queue , n, A, lda, SA, m, ipiv);
}
/**
@see magmablas_zclaswp_q
@ingroup magma_zaux2
********************************************************************/
extern "C" void
magmablas_zclaswp(
magma_int_t n, magmaDoubleComplex *A, magma_int_t lda,
magmaFloatComplex *SA, magma_int_t m,
const magma_int_t *ipiv, magma_int_t incx )
{
magmablas_zclaswp_q( n, A, lda, SA, m, ipiv, incx, magma_stream );
}
| 10d909bd27bbde8436e81f589d783a87d2c4a748.cu | /*
-- MAGMA (version 1.5.0) --
Univ. of Tennessee, Knoxville
Univ. of California, Berkeley
Univ. of Colorado, Denver
@date September 2014
@precisions mixed zc -> ds
*/
#include "common_magma.h"
#define NB 64
// TODO check precision, as in zlag2c?
__global__ void
zclaswp_kernel(int n, magmaDoubleComplex *A, int lda, magmaFloatComplex *SA, int m, const magma_int_t *ipiv)
{
int ind = blockIdx.x*NB + threadIdx.x;
int newind;
magmaFloatComplex res;
if (ind < m) {
SA += ind;
ipiv += ind;
newind = ipiv[0];
for(int i=0; i < n; i++) {
res = MAGMA_C_MAKE( (float)cuCreal(A[newind+i*lda]),
(float)cuCimag(A[newind+i*lda]) );
SA[i*lda] = res;
}
}
}
__global__ void
zclaswp_inv_kernel(int n, magmaDoubleComplex *A, int lda, magmaFloatComplex *SA, int m, const magma_int_t *ipiv)
{
int ind = blockIdx.x*NB + threadIdx.x;
int newind;
magmaDoubleComplex res;
if (ind < m) {
A += ind;
ipiv += ind;
newind = ipiv[0];
for(int i=0; i < n; i++) {
res = MAGMA_Z_MAKE( (double)cuCrealf(SA[newind+i*lda]),
(double)cuCimagf(SA[newind+i*lda]) );
A[i*lda] = res;
}
}
}
/**
Purpose
-------
Row i of A is cast to single precision in row ipiv[i] of SA (incx > 0), or
row i of SA is cast to double precision in row ipiv[i] of A (incx < 0),
for 0 <= i < M.
@param[in]
n INTEGER.
On entry, N specifies the number of columns of the matrix A.
@param[in,out]
A DOUBLE PRECISION array on the GPU, dimension (LDA,N)
On entry, the M-by-N matrix to which the row interchanges will be applied.
TODO update docs
@param[in]
lda INTEGER.
LDA specifies the leading dimension of A.
@param[in,out]
SA REAL array on the GPU, dimension (LDA,N)
On exit, the single precision, permuted matrix.
TODO update docs
@param[in]
m The number of rows to be interchanged.
@param[in]
ipiv INTEGER array on the GPU, dimension (M)
The vector of pivot indices. Row i of A is cast to single
precision in row ipiv[i] of SA, for 0 <= i < m.
@param[in]
incx INTEGER
If INCX is negative, the pivots are applied in reverse order,
otherwise in straight-forward order.
@param[in]
queue magma_queue_t
Queue to execute in.
@ingroup magma_zaux2
********************************************************************/
extern "C" void
magmablas_zclaswp_q(
magma_int_t n, magmaDoubleComplex *A, magma_int_t lda,
magmaFloatComplex *SA, magma_int_t m,
const magma_int_t *ipiv, magma_int_t incx,
magma_queue_t queue )
{
int blocks = (m - 1)/NB + 1;
dim3 grid(blocks, 1, 1);
dim3 threads(NB, 1, 1);
if (incx >= 0)
zclaswp_kernel<<< grid, threads, 0, queue >>>(n, A, lda, SA, m, ipiv);
else
zclaswp_inv_kernel<<< grid, threads, 0, queue >>>(n, A, lda, SA, m, ipiv);
}
/**
@see magmablas_zclaswp_q
@ingroup magma_zaux2
********************************************************************/
extern "C" void
magmablas_zclaswp(
magma_int_t n, magmaDoubleComplex *A, magma_int_t lda,
magmaFloatComplex *SA, magma_int_t m,
const magma_int_t *ipiv, magma_int_t incx )
{
magmablas_zclaswp_q( n, A, lda, SA, m, ipiv, incx, magma_stream );
}
|
34b991a09b8a43e7a7af6c04274fb63d6ac317aa.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
//Udacity HW 4
//Radix Sorting
#include "utils.h"
#include <thrust/host_vector.h>
#include <algorithm>
#include <cstring>
/* Red Eye Removal
===============
For this assignment we are implementing red eye removal. This is
accomplished by first creating a score for every pixel that tells us how
likely it is to be a red eye pixel. We have already done this for you - you
are receiving the scores and need to sort them in ascending order so that we
know which pixels to alter to remove the red eye.
Note: ascending order == smallest to largest
Each score is associated with a position, when you sort the scores, you must
also move the positions accordingly.
Implementing Parallel Radix Sort with CUDA
==========================================
The basic idea is to construct a histogram on each pass of how many of each
"digit" there are. Then we scan this histogram so that we know where to put
the output of each digit. For example, the first 1 must come after all the
0s so we have to know how many 0s there are to be able to start moving 1s
into the correct position.
1) Histogram of the number of occurrences of each digit
2) Exclusive Prefix Sum of Histogram
3) Determine relative offset of each digit
For example [0 0 1 1 0 0 1]
-> [0 1 0 1 2 3 2]
4) Combine the results of steps 2 & 3 to determine the final
output location for each element and move it there
LSB Radix sort is an out-of-place sort and you will need to ping-pong values
between the input and output buffers we have provided. Make sure the final
sorted results end up in the output buffer! Hint: You may need to do a copy
at the end.
*/
/// parallel histogram function on GPU
__global__ void hist_create(unsigned int* Hist,
unsigned int* const Vals,
unsigned int digit)
{
// S_Hist is allocated in the kernel call: 3rd arg to <<<b, t, shmem>>>
//extern __shared__ unsigned int S_Hist[];
int tid = threadIdx.x + blockIdx.x*blockDim.x;
unsigned int mask = 1 << digit;
unsigned int bin = (Vals[tid] & mask) >> digit;
atomicAdd(&(Hist[bin]), 1);
//__syncthreads();
}
/// parallel predicate on GPU
__global__ void predicate(unsigned int* const Vals,
unsigned int* Vals_saved0,
unsigned int* Vals_saved1,
unsigned int digit)
{
int tid = threadIdx.x + blockIdx.x*blockDim.x;
unsigned int mask = 1 << digit;
unsigned int bin = (Vals[tid] & mask) >> digit;
if (bin == 0) {
Vals_saved0[tid] = 1;
Vals_saved1[tid] = 0;
}
__syncthreads();
if (bin == 1) {
Vals_saved0[tid] = 0;
Vals_saved1[tid] = 1;
}
}
/// parallel exclusive sum on GPU
__global__ void exclus_sum(unsigned int* Vals,
unsigned int* exc_sum,
int stepsize,
int whichblk )
{
int tid = threadIdx.x + blockIdx.x*blockDim.x + whichblk*520;
unsigned int sum=0;
exc_sum[tid] = Vals[tid];
__syncthreads();
if ( (tid-stepsize) >= 0 ) {
sum = exc_sum[tid] + exc_sum[tid - stepsize];
}
else {
sum = exc_sum[tid];
}
__syncthreads();
exc_sum[tid] = sum;
}
/// parallel histogram function on GPU
__global__ void compact(unsigned int* d_inputVals,
unsigned int* d_inputPos,
unsigned int* d_outputVals,
unsigned int* d_outputPos,
unsigned int* exc_sum0,
unsigned int* exc_sum1,
unsigned int* predicate0,
unsigned int* Hist)
{
int tid = threadIdx.x + blockIdx.x*blockDim.x;
unsigned int digit = 1;
if (digit == predicate0[tid] ) {
unsigned int a = exc_sum0[tid];
d_outputVals[a] = d_inputVals[tid];
d_outputPos[a] = d_inputPos[tid];
}
__syncthreads();
digit = 0;
if ( digit == predicate0[tid] ) {
unsigned int a = exc_sum1[tid] + Hist[0];
d_outputVals[a] = d_inputVals[tid];
d_outputPos[a] = d_inputPos[tid];
}
}
/// parallel swap out to input on GPU
__global__ void swap_oi(unsigned int* inputV,
unsigned int* inputP,
unsigned int* outputV,
unsigned int* outputP)
{
int myx = threadIdx.x + blockIdx.x*blockDim.x;
int myy = threadIdx.y + blockIdx.y*blockDim.y;
int myId = myx + myy*gridDim.x*blockDim.x;
inputV[myId] = outputV[myId];
inputP[myId] = outputP[myId];
}
void your_sort(unsigned int* const d_inputVals,
unsigned int* const d_inputPos,
unsigned int* const d_outputVals,
unsigned int* const d_outputPos,
const size_t numElems)
{
//TODO
//PUT YOUR SORT HERE
unsigned int numBins = 2;
unsigned int *d_binHistogram, *d_predicate0, *d_predicate1, *d_tempvals0, *d_tempvals1;
unsigned int h_binHistogram[numBins],h_test[numElems],h_test2[numElems], h_inputVals[numElems],h_predicate0[numElems],h_predicate1[numElems];
unsigned int n_dig=32;
checkCudaErrors(hipMalloc((void**) &d_binHistogram, numBins*sizeof(unsigned int) ) );
checkCudaErrors(hipMalloc((void**) &d_predicate0, numElems*sizeof(unsigned int) ) );
checkCudaErrors(hipMalloc((void**) &d_predicate1, numElems*sizeof(unsigned int) ) );
checkCudaErrors(hipMalloc((void**) &d_tempvals0, numElems*sizeof(unsigned int) ) );
checkCudaErrors(hipMalloc((void**) &d_tempvals1, numElems*sizeof(unsigned int) ) );
const dim3 blockSize(520 ,1, 1);
const dim3 gridSize(1, 1, 1);
//checkCudaErrors(hipMemcpy(h_inputVals, d_inputVals, numElems*sizeof(unsigned int), hipMemcpyDeviceToHost));
for (unsigned int digit = 0; digit < n_dig; ++digit) {
checkCudaErrors(hipMemset((void**) d_binHistogram, 0, numBins*sizeof(unsigned int) ) );
checkCudaErrors(hipMemset((void**) d_predicate0, 0, numElems*sizeof(unsigned int) ) );
checkCudaErrors(hipMemset((void**) d_tempvals0, 0, numElems*sizeof(unsigned int) ) );
hipLaunchKernelGGL(( hist_create) , dim3(gridSize), dim3(blockSize), 0, 0, d_binHistogram, d_inputVals, digit);
//checkCudaErrors(hipMemcpy(&h_binHistogram, d_binHistogram, numBins*sizeof(unsigned int), hipMemcpyDeviceToHost));
printf("i %d \n", digit );
hipLaunchKernelGGL(( predicate) , dim3(gridSize), dim3(blockSize), 0, 0, d_inputVals,
d_predicate0,
d_predicate1,
digit);
//checkCudaErrors(hipMemcpy(h_predicate0, d_predicate0, numElems*sizeof(unsigned int), hipMemcpyDeviceToHost));
// printf("d_p %d \n", h_predicate0[22] );
// for (unsigned int j = 0; j < numElems; ++j) {
// h_predicate0[j] = 0;
// }
// unsigned int mask = 1 << digit;
// unsigned int summ=0;
//perform histogram of data & mask into bins
// for (unsigned int j = 0; j < numElems; ++j) {
// unsigned int bin = (h_inputVals[j] & mask) >> digit;
// if(bin == 0) {
// summ +=1;
// h_predicate0[j] = summ;}
// }
//printf("h_p %d ", h_predicate0[29] );
// checkCudaErrors(hipMemcpy(d_predicate0, h_predicate0, numElems*sizeof(unsigned int), hipMemcpyHostToDevice));
// checkCudaErrors(hipMemcpy(d_predicate1, h_predicate1, numElems*sizeof(unsigned int), hipMemcpyHostToDevice));
// hipDeviceSynchronize();
for(int s = 1; s <= numElems; s *= 2) {
for (int j = 0; j < 424 ; j++) {
hipLaunchKernelGGL(( exclus_sum) , dim3(gridSize), dim3(blockSize), 0, 0, d_predicate0, d_tempvals0, s, j);
hipDeviceSynchronize();
}
}
// hipLaunchKernelGGL(( exclus_sum) , dim3(gridSize), dim3(blockSize), 0, 0, d_predicate1, d_tempvals1, numElems);
checkCudaErrors(hipMemcpy(h_test, d_tempvals0, numElems*sizeof(unsigned int), hipMemcpyDeviceToHost));
//printf("d_p %d \n", h_test[22] );
// checkCudaErrors(hipMemcpy(h_test2, d_tempvals1, numElems*sizeof(unsigned int), hipMemcpyDeviceToHost));
/*
compact<<<gridSize, blockSize>>> (d_inputVals,
d_inputPos,
d_outputVals,
d_outputPos,
d_tempvals0,
d_tempvals1,
d_predicate0,
d_binHistogram);
hipDeviceSynchronize();
if (digit < (n_dig-1) ){
swap_oi<<<gridSize, blockSize>>> (d_inputVals,
d_inputPos,
d_outputVals,
d_outputPos);
}
hipDeviceSynchronize();
*/
}
checkCudaErrors(hipFree(d_binHistogram));
checkCudaErrors(hipFree(d_predicate0));
checkCudaErrors(hipFree(d_predicate1));
checkCudaErrors(hipFree(d_tempvals0));
checkCudaErrors(hipFree(d_tempvals1));
}
| 34b991a09b8a43e7a7af6c04274fb63d6ac317aa.cu | //Udacity HW 4
//Radix Sorting
#include "utils.h"
#include <thrust/host_vector.h>
#include <algorithm>
#include <cstring>
/* Red Eye Removal
===============
For this assignment we are implementing red eye removal. This is
accomplished by first creating a score for every pixel that tells us how
likely it is to be a red eye pixel. We have already done this for you - you
are receiving the scores and need to sort them in ascending order so that we
know which pixels to alter to remove the red eye.
Note: ascending order == smallest to largest
Each score is associated with a position, when you sort the scores, you must
also move the positions accordingly.
Implementing Parallel Radix Sort with CUDA
==========================================
The basic idea is to construct a histogram on each pass of how many of each
"digit" there are. Then we scan this histogram so that we know where to put
the output of each digit. For example, the first 1 must come after all the
0s so we have to know how many 0s there are to be able to start moving 1s
into the correct position.
1) Histogram of the number of occurrences of each digit
2) Exclusive Prefix Sum of Histogram
3) Determine relative offset of each digit
For example [0 0 1 1 0 0 1]
-> [0 1 0 1 2 3 2]
4) Combine the results of steps 2 & 3 to determine the final
output location for each element and move it there
LSB Radix sort is an out-of-place sort and you will need to ping-pong values
between the input and output buffers we have provided. Make sure the final
sorted results end up in the output buffer! Hint: You may need to do a copy
at the end.
*/
/// parallel histogram function on GPU
__global__ void hist_create(unsigned int* Hist,
unsigned int* const Vals,
unsigned int digit)
{
// S_Hist is allocated in the kernel call: 3rd arg to <<<b, t, shmem>>>
//extern __shared__ unsigned int S_Hist[];
int tid = threadIdx.x + blockIdx.x*blockDim.x;
unsigned int mask = 1 << digit;
unsigned int bin = (Vals[tid] & mask) >> digit;
atomicAdd(&(Hist[bin]), 1);
//__syncthreads();
}
/// parallel predicate on GPU
__global__ void predicate(unsigned int* const Vals,
unsigned int* Vals_saved0,
unsigned int* Vals_saved1,
unsigned int digit)
{
int tid = threadIdx.x + blockIdx.x*blockDim.x;
unsigned int mask = 1 << digit;
unsigned int bin = (Vals[tid] & mask) >> digit;
if (bin == 0) {
Vals_saved0[tid] = 1;
Vals_saved1[tid] = 0;
}
__syncthreads();
if (bin == 1) {
Vals_saved0[tid] = 0;
Vals_saved1[tid] = 1;
}
}
/// parallel exclusive sum on GPU
__global__ void exclus_sum(unsigned int* Vals,
unsigned int* exc_sum,
int stepsize,
int whichblk )
{
int tid = threadIdx.x + blockIdx.x*blockDim.x + whichblk*520;
unsigned int sum=0;
exc_sum[tid] = Vals[tid];
__syncthreads();
if ( (tid-stepsize) >= 0 ) {
sum = exc_sum[tid] + exc_sum[tid - stepsize];
}
else {
sum = exc_sum[tid];
}
__syncthreads();
exc_sum[tid] = sum;
}
/// parallel histogram function on GPU
__global__ void compact(unsigned int* d_inputVals,
unsigned int* d_inputPos,
unsigned int* d_outputVals,
unsigned int* d_outputPos,
unsigned int* exc_sum0,
unsigned int* exc_sum1,
unsigned int* predicate0,
unsigned int* Hist)
{
int tid = threadIdx.x + blockIdx.x*blockDim.x;
unsigned int digit = 1;
if (digit == predicate0[tid] ) {
unsigned int a = exc_sum0[tid];
d_outputVals[a] = d_inputVals[tid];
d_outputPos[a] = d_inputPos[tid];
}
__syncthreads();
digit = 0;
if ( digit == predicate0[tid] ) {
unsigned int a = exc_sum1[tid] + Hist[0];
d_outputVals[a] = d_inputVals[tid];
d_outputPos[a] = d_inputPos[tid];
}
}
/// parallel swap out to input on GPU
__global__ void swap_oi(unsigned int* inputV,
unsigned int* inputP,
unsigned int* outputV,
unsigned int* outputP)
{
int myx = threadIdx.x + blockIdx.x*blockDim.x;
int myy = threadIdx.y + blockIdx.y*blockDim.y;
int myId = myx + myy*gridDim.x*blockDim.x;
inputV[myId] = outputV[myId];
inputP[myId] = outputP[myId];
}
void your_sort(unsigned int* const d_inputVals,
unsigned int* const d_inputPos,
unsigned int* const d_outputVals,
unsigned int* const d_outputPos,
const size_t numElems)
{
//TODO
//PUT YOUR SORT HERE
unsigned int numBins = 2;
unsigned int *d_binHistogram, *d_predicate0, *d_predicate1, *d_tempvals0, *d_tempvals1;
unsigned int h_binHistogram[numBins],h_test[numElems],h_test2[numElems], h_inputVals[numElems],h_predicate0[numElems],h_predicate1[numElems];
unsigned int n_dig=32;
checkCudaErrors(cudaMalloc((void**) &d_binHistogram, numBins*sizeof(unsigned int) ) );
checkCudaErrors(cudaMalloc((void**) &d_predicate0, numElems*sizeof(unsigned int) ) );
checkCudaErrors(cudaMalloc((void**) &d_predicate1, numElems*sizeof(unsigned int) ) );
checkCudaErrors(cudaMalloc((void**) &d_tempvals0, numElems*sizeof(unsigned int) ) );
checkCudaErrors(cudaMalloc((void**) &d_tempvals1, numElems*sizeof(unsigned int) ) );
const dim3 blockSize(520 ,1, 1);
const dim3 gridSize(1, 1, 1);
//checkCudaErrors(cudaMemcpy(h_inputVals, d_inputVals, numElems*sizeof(unsigned int), cudaMemcpyDeviceToHost));
for (unsigned int digit = 0; digit < n_dig; ++digit) {
checkCudaErrors(cudaMemset((void**) d_binHistogram, 0, numBins*sizeof(unsigned int) ) );
checkCudaErrors(cudaMemset((void**) d_predicate0, 0, numElems*sizeof(unsigned int) ) );
checkCudaErrors(cudaMemset((void**) d_tempvals0, 0, numElems*sizeof(unsigned int) ) );
hist_create <<<gridSize, blockSize>>> (d_binHistogram, d_inputVals, digit);
//checkCudaErrors(cudaMemcpy(&h_binHistogram, d_binHistogram, numBins*sizeof(unsigned int), cudaMemcpyDeviceToHost));
printf("i %d \n", digit );
predicate <<<gridSize, blockSize>>> ( d_inputVals,
d_predicate0,
d_predicate1,
digit);
//checkCudaErrors(cudaMemcpy(h_predicate0, d_predicate0, numElems*sizeof(unsigned int), cudaMemcpyDeviceToHost));
// printf("d_p %d \n", h_predicate0[22] );
// for (unsigned int j = 0; j < numElems; ++j) {
// h_predicate0[j] = 0;
// }
// unsigned int mask = 1 << digit;
// unsigned int summ=0;
//perform histogram of data & mask into bins
// for (unsigned int j = 0; j < numElems; ++j) {
// unsigned int bin = (h_inputVals[j] & mask) >> digit;
// if(bin == 0) {
// summ +=1;
// h_predicate0[j] = summ;}
// }
//printf("h_p %d ", h_predicate0[29] );
// checkCudaErrors(cudaMemcpy(d_predicate0, h_predicate0, numElems*sizeof(unsigned int), cudaMemcpyHostToDevice));
// checkCudaErrors(cudaMemcpy(d_predicate1, h_predicate1, numElems*sizeof(unsigned int), cudaMemcpyHostToDevice));
// cudaDeviceSynchronize();
for(int s = 1; s <= numElems; s *= 2) {
for (int j = 0; j < 424 ; j++) {
exclus_sum <<<gridSize, blockSize>>> (d_predicate0, d_tempvals0, s, j);
cudaDeviceSynchronize();
}
}
// exclus_sum <<<gridSize, blockSize>>> (d_predicate1, d_tempvals1, numElems);
checkCudaErrors(cudaMemcpy(h_test, d_tempvals0, numElems*sizeof(unsigned int), cudaMemcpyDeviceToHost));
//printf("d_p %d \n", h_test[22] );
// checkCudaErrors(cudaMemcpy(h_test2, d_tempvals1, numElems*sizeof(unsigned int), cudaMemcpyDeviceToHost));
/*
compact<<<gridSize, blockSize>>> (d_inputVals,
d_inputPos,
d_outputVals,
d_outputPos,
d_tempvals0,
d_tempvals1,
d_predicate0,
d_binHistogram);
cudaDeviceSynchronize();
if (digit < (n_dig-1) ){
swap_oi<<<gridSize, blockSize>>> (d_inputVals,
d_inputPos,
d_outputVals,
d_outputPos);
}
cudaDeviceSynchronize();
*/
}
checkCudaErrors(cudaFree(d_binHistogram));
checkCudaErrors(cudaFree(d_predicate0));
checkCudaErrors(cudaFree(d_predicate1));
checkCudaErrors(cudaFree(d_tempvals0));
checkCudaErrors(cudaFree(d_tempvals1));
}
|
501f449faad5903ed65f3d5802ce602406d2356d.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <stdio.h>
#include "gputimer.h"
#define NUM_THREADS 1000000
#define ARRAY_SIZE 100
#define BLOCK_WIDTH 1000
void print_array(int *array, int size)
{
printf("{ ");
for (int i = 0; i < size; i++) { printf("%d ", array[i]); }
printf("}\n");
}
__global__ void increment_naive(int *g)
{
// which thread is this?
int i = blockIdx.x * blockDim.x + threadIdx.x;
// each thread to increment consecutive elements, wrapping at ARRAY_SIZE
i = i % ARRAY_SIZE;
g[i] = g[i] + 1;
}
__global__ void increment_atomic(int *g)
{
// which thread is this?
int i = blockIdx.x * blockDim.x + threadIdx.x;
// each thread to increment consecutive elements, wrapping at ARRAY_SIZE
i = i % ARRAY_SIZE;
atomicAdd(& g[i], 1);
}
int main(int argc,char **argv)
{
GpuTimer timer;
printf("%d total threads in %d blocks writing into %d array elements\n",
NUM_THREADS, NUM_THREADS / BLOCK_WIDTH, ARRAY_SIZE);
// declare and allocate host memory
int h_array[ARRAY_SIZE];
const int ARRAY_BYTES = ARRAY_SIZE * sizeof(int);
// declare, allocate, and zero out GPU memory
int * d_array;
hipMalloc((void **) &d_array, ARRAY_BYTES);
hipMemset((void *) d_array, 0, ARRAY_BYTES);
// launch the kernel - comment out one of these
timer.Start();
// Instructions: This program is needed for the next quiz
// uncomment increment_naive to measure speed and accuracy
// of non-atomic increments or uncomment increment_atomic to
// measure speed and accuracy of atomic icrements
hipLaunchKernelGGL(( increment_naive), dim3(NUM_THREADS/BLOCK_WIDTH), dim3(BLOCK_WIDTH), 0, 0, d_array);
//increment_atomic<<<NUM_THREADS/BLOCK_WIDTH, BLOCK_WIDTH>>>(d_array);
timer.Stop();
// copy back the array of sums from GPU and print
hipMemcpy(h_array, d_array, ARRAY_BYTES, hipMemcpyDeviceToHost);
print_array(h_array, ARRAY_SIZE);
printf("Time elapsed = %g ms\n", timer.Elapsed());
// free GPU memory allocation and exit
hipFree(d_array);
return 0;
}
| 501f449faad5903ed65f3d5802ce602406d2356d.cu | #include <stdio.h>
#include "gputimer.h"
#define NUM_THREADS 1000000
#define ARRAY_SIZE 100
#define BLOCK_WIDTH 1000
void print_array(int *array, int size)
{
printf("{ ");
for (int i = 0; i < size; i++) { printf("%d ", array[i]); }
printf("}\n");
}
__global__ void increment_naive(int *g)
{
// which thread is this?
int i = blockIdx.x * blockDim.x + threadIdx.x;
// each thread to increment consecutive elements, wrapping at ARRAY_SIZE
i = i % ARRAY_SIZE;
g[i] = g[i] + 1;
}
__global__ void increment_atomic(int *g)
{
// which thread is this?
int i = blockIdx.x * blockDim.x + threadIdx.x;
// each thread to increment consecutive elements, wrapping at ARRAY_SIZE
i = i % ARRAY_SIZE;
atomicAdd(& g[i], 1);
}
int main(int argc,char **argv)
{
GpuTimer timer;
printf("%d total threads in %d blocks writing into %d array elements\n",
NUM_THREADS, NUM_THREADS / BLOCK_WIDTH, ARRAY_SIZE);
// declare and allocate host memory
int h_array[ARRAY_SIZE];
const int ARRAY_BYTES = ARRAY_SIZE * sizeof(int);
// declare, allocate, and zero out GPU memory
int * d_array;
cudaMalloc((void **) &d_array, ARRAY_BYTES);
cudaMemset((void *) d_array, 0, ARRAY_BYTES);
// launch the kernel - comment out one of these
timer.Start();
// Instructions: This program is needed for the next quiz
// uncomment increment_naive to measure speed and accuracy
// of non-atomic increments or uncomment increment_atomic to
// measure speed and accuracy of atomic icrements
increment_naive<<<NUM_THREADS/BLOCK_WIDTH, BLOCK_WIDTH>>>(d_array);
//increment_atomic<<<NUM_THREADS/BLOCK_WIDTH, BLOCK_WIDTH>>>(d_array);
timer.Stop();
// copy back the array of sums from GPU and print
cudaMemcpy(h_array, d_array, ARRAY_BYTES, cudaMemcpyDeviceToHost);
print_array(h_array, ARRAY_SIZE);
printf("Time elapsed = %g ms\n", timer.Elapsed());
// free GPU memory allocation and exit
cudaFree(d_array);
return 0;
}
|
4ae514c16fdaadfd8bc2a309b9ea663328827a01.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "includes.h"
__global__ void solution(float* img, float* xbar, int w, int h, int nc) {
int x = threadIdx.x + blockDim.x * blockIdx.x;
int y = threadIdx.y + blockDim.y * blockIdx.y;
if (x < w && y < h) {
int i;
for (int z = 0; z < nc; z++) {
i = x + w * y + w * h * z;
img[i] = xbar[i];
}
}
} | 4ae514c16fdaadfd8bc2a309b9ea663328827a01.cu | #include "includes.h"
__global__ void solution(float* img, float* xbar, int w, int h, int nc) {
int x = threadIdx.x + blockDim.x * blockIdx.x;
int y = threadIdx.y + blockDim.y * blockIdx.y;
if (x < w && y < h) {
int i;
for (int z = 0; z < nc; z++) {
i = x + w * y + w * h * z;
img[i] = xbar[i];
}
}
} |
b7c2b8439e4ea867504be55431ecc78c8461cc43.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/*
* noise_remover.cpp
*
* This program removes noise from an image based on Speckle Reducing Anisotropic Diffusion
* Y. Yu, S. Acton, Speckle reducing anisotropic diffusion,
* IEEE Transactions on Image Processing 11(11)(2002) 1260-1270 <http://people.virginia.edu/~sc5nf/01097762.pdf>
* Original implementation is Modified by Burak BASTEM
*/
#include <stdlib.h>
#include <stdio.h>
#include <math.h>
#include <string.h>
#include <sys/time.h>
#define STB_IMAGE_IMPLEMENTATION
#include "stb_image.h"
#define STB_IMAGE_WRITE_IMPLEMENTATION
#include "stb_image_write.h"
#include <hip/hip_cooperative_groups.h>
#define BLOCK_SIZE 256
#define TILE_DIM 32
#define MATCH(s) (!strcmp(argv[ac], (s)))
// returns the current time
static const double kMicro = 1.0e-6;
double get_time() {
struct timeval TV;
struct timezone TZ;
const int RC = gettimeofday(&TV, &TZ);
if(RC == -1) {
printf("ERROR: Bad call to gettimeofday\n");
return(-1);
}
return( ((double)TV.tv_sec) + kMicro * ((double)TV.tv_usec) );
}
namespace cg = cooperative_groups;
// Utility class used to avoid linker errors with extern
// unsized shared memory arrays with templated type
template <class T>
struct SharedMemory {
__device__ inline operator T *() {
extern __shared__ int __smem[];
return (T *)__smem;
}
__device__ inline operator const T *() const {
extern __shared__ int __smem[];
return (T *)__smem;
}
};
// specialize for double to avoid unaligned memory
// access compile errors
template <>
struct SharedMemory<double> {
__device__ inline operator double *() {
extern __shared__ double __smem_d[];
return (double *)__smem_d;
}
__device__ inline operator const double *() const {
extern __shared__ double __smem_d[];
return (double *)__smem_d;
}
};
__global__ void warmup(){}
__global__ void compute1(unsigned char* image, float* diff_coef, float* std_dev, int width, int height,
float* north, float* south, float* east, float* west)
{
int col = blockIdx.x * blockDim.x + threadIdx.x;
int row = blockIdx.y * blockDim.y + threadIdx.y;
int index = row * width + col;
if(col < width - 1 && row < height - 1 && row > 0 && col > 0)
{
float north_k, south_k, west_k, east_k, image_k, deviation, coef;
image_k = image[index];
deviation = std_dev[0];
north[index] = north_k = image[index - width] - image_k;
south[index] = south_k = image[index + width] - image_k;
west[index] = west_k = image[index - 1] - image_k;
east[index] = east_k = image[index + 1] - image_k;
float gradient_square = ( north_k * north_k
+ south_k * south_k
+ west_k * west_k
+ east_k * east_k ) / (image_k * image_k);
float laplacian = (north_k + south_k + west_k + east_k) / image_k;
float num = (0.5 * gradient_square) - ((1.0 / 16.0) * (laplacian * laplacian));
float den = 1 + (.25 * laplacian);
float std_dev2 = num / (den * den);
den = (std_dev2 - deviation) / (deviation * (1 + deviation));
coef = 1.0 / (1.0 + den);
if (coef < 0) {
diff_coef[index] = 0;
} else if (coef > 1){
diff_coef[index] = 1;
} else {
diff_coef[index] = coef;
}
}
}
__global__ void compute2(unsigned char* image, float* diff_coef, float* north, float* south,
float* east, float* west, float lambda, int width, int height)
{
__shared__ float temp[TILE_DIM + 1][TILE_DIM + 1];
int ty = threadIdx.y, tx = threadIdx.x, bx = blockIdx.x, by = blockIdx.y;
int col = bx * blockDim.x + tx;
int row = by * blockDim.y + ty;
int index = row * width + col;
temp[ty][tx] = diff_coef[index];
if(tx == 0){
temp[ty][TILE_DIM] = diff_coef[index + TILE_DIM];
}
if(ty == 0){
temp[TILE_DIM][tx] = diff_coef[(TILE_DIM + row) * width + col];
}
__syncthreads();
if(row > 0 && col > 0 && row < height - 1 && col < width - 1){
float diff_coef_north = temp[ty][tx];
float diff_coef_south = temp[ty + 1][tx];
float diff_coef_west = temp[ty][tx];
float diff_coef_east = temp[ty][tx + 1];
float divergence = diff_coef_north * north[index]
+ diff_coef_south * south[index]
+ diff_coef_west * west[index]
+ diff_coef_east * east[index];
image[index] = image[index] + 0.25 * lambda * divergence;
}
}
template <class T, unsigned int blockSize, bool nIsPow2>
__global__ void reduce1(unsigned char *g_idata, T *g_odata, unsigned int n) {
// Handle to thread block group
cg::thread_block cta = cg::this_thread_block();
T *sdata = SharedMemory<T>();
// perform first level of reduction,
// reading from global memory, writing to shared memory
unsigned int tid = threadIdx.x;
unsigned int i = blockIdx.x * blockSize * 2 + threadIdx.x;
unsigned int gridSize = blockSize * 2 * gridDim.x;
T mySum = 0;
// we reduce multiple elements per thread. The number is determined by the
// number of active thread blocks (via gridDim). More blocks will result
// in a larger gridSize and therefore fewer elements per thread
while (i < n) {
mySum += g_idata[i];
// ensure we don't read out of bounds -- this is optimized away for powerOf2
// sized arrays
if (nIsPow2 || i + blockSize < n) mySum += g_idata[i + blockSize];
i += gridSize;
}
// each thread puts its local sum into shared memory
sdata[tid] = mySum;
cg::sync(cta);
// do reduction in shared mem
if ((blockSize >= 512) && (tid < 256)) {
sdata[tid] = mySum = mySum + sdata[tid + 256];
}
cg::sync(cta);
if ((blockSize >= 256) && (tid < 128)) {
sdata[tid] = mySum = mySum + sdata[tid + 128];
}
cg::sync(cta);
if ((blockSize >= 128) && (tid < 64)) {
sdata[tid] = mySum = mySum + sdata[tid + 64];
}
cg::sync(cta);
cg::thread_block_tile<32> tile32 = cg::tiled_partition<32>(cta);
if (cta.thread_rank() < 32) {
// Fetch final intermediate sum from 2nd warp
if (blockSize >= 64) mySum += sdata[tid + 32];
// Reduce final warp using shuffle
for (int offset = tile32.size() / 2; offset > 0; offset /= 2) {
mySum += tile32.shfl_down(mySum, offset);
}
}
// write result for this block to global mem
if (cta.thread_rank() == 0) g_odata[blockIdx.x] = mySum;
}
template <class T, unsigned int blockSize, bool nIsPow2>
__global__ void reduce2(unsigned char *g_idata, T *g_odata, unsigned int n) {
// Handle to thread block group
cg::thread_block cta = cg::this_thread_block();
T *sdata = SharedMemory<T>();
// perform first level of reduction,
// reading from global memory, writing to shared memory
unsigned int tid = threadIdx.x;
unsigned int i = blockIdx.x * blockSize * 2 + threadIdx.x;
unsigned int gridSize = blockSize * 2 * gridDim.x;
T mySum = 0;
// we reduce multiple elements per thread. The number is determined by the
// number of active thread blocks (via gridDim). More blocks will result
// in a larger gridSize and therefore fewer elements per thread
while (i < n) {
mySum += g_idata[i] * g_idata[i];
// ensure we don't read out of bounds -- this is optimized away for powerOf2
// sized arrays
if (nIsPow2 || i + blockSize < n) mySum += g_idata[i + blockSize] * g_idata[i + blockSize];
i += gridSize;
}
// each thread puts its local sum into shared memory
sdata[tid] = mySum;
cg::sync(cta);
// do reduction in shared mem
if ((blockSize >= 512) && (tid < 256)) {
sdata[tid] = mySum = mySum + sdata[tid + 256];
}
cg::sync(cta);
if ((blockSize >= 256) && (tid < 128)) {
sdata[tid] = mySum = mySum + sdata[tid + 128];
}
cg::sync(cta);
if ((blockSize >= 128) && (tid < 64)) {
sdata[tid] = mySum = mySum + sdata[tid + 64];
}
cg::sync(cta);
cg::thread_block_tile<32> tile32 = cg::tiled_partition<32>(cta);
if (cta.thread_rank() < 32) {
// Fetch final intermediate sum from 2nd warp
if (blockSize >= 64) mySum += sdata[tid + 32];
// Reduce final warp using shuffle
for (int offset = tile32.size() / 2; offset > 0; offset /= 2) {
mySum += tile32.shfl_down(mySum, offset);
}
}
// write result for this block to global mem
if (cta.thread_rank() == 0) g_odata[blockIdx.x] = mySum;
}
__global__ void standard_dev(float* sums, float* sums2, float* std_dev, int size, int numBlocks)
{
float sum = 0, sum2 = 0;
for(int i=0; i < numBlocks; i++){
sum += sums[i];
sum2 += sums2[i];
}
float mean = sum / size;
float variance = (sum2 / size) - mean * mean; // --- 3 floating point arithmetic operations
std_dev[0] = variance / (mean * mean);
}
extern "C" bool isPow2(unsigned int x) { return ((x & (x - 1)) == 0); }
int main(int argc, char *argv[]) {
// Part I: allocate and initialize variables
double time_0, time_1, time_2, time_3, time_4, time_5, time_6, time_7, time_8; // time variables
time_0 = get_time();
const char *filename = "input.pgm";
const char *outputname = "output.png";
int width, height, pixelWidth, n_pixels;
int n_iter = 50;
float lambda = 0.5;
float *north_deriv_dev, *south_deriv_dev, *west_deriv_dev, *east_deriv_dev; // device derivatives
float *sums, *sums2, *std_dev; // calculation variables
float *diff_coef_dev; // diffusion coefficient
unsigned char *image_dev;
time_1 = get_time();
// Part II: parse command line arguments
if(argc<2) {
printf("Usage: %s [-i < filename>] [-iter <n_iter>] [-l <lambda>] [-o <outputfilename>]\n",argv[0]);
return(-1);
}
for(int ac=1;ac<argc;ac++) {
if(MATCH("-i")) {
filename = argv[++ac];
} else if(MATCH("-iter")) {
n_iter = atoi(argv[++ac]);
} else if(MATCH("-l")) {
lambda = atof(argv[++ac]);
} else if(MATCH("-o")) {
outputname = argv[++ac];
} else {
printf("Usage: %s [-i < filename>] [-iter <n_iter>] [-l <lambda>] [-o <outputfilename>]\n",argv[0]);
return(-1);
}
}
time_2 = get_time();
// Part III: read image
printf("Reading image...\n");
unsigned char *image = stbi_load(filename, &width, &height, &pixelWidth, 0);
if (!image) {
fprintf(stderr, "Couldn't load image.\n");
return (-1);
}
printf("Image Read. Width : %d, Height : %d, nComp: %d\n",width,height,pixelWidth);
n_pixels = height * width;
time_3 = get_time();
// Part IV: allocate variables
hipMalloc((void**)&north_deriv_dev, sizeof(float) * n_pixels);
hipMalloc((void**)&south_deriv_dev, sizeof(float) * n_pixels);
hipMalloc((void**)&west_deriv_dev, sizeof(float) * n_pixels);
hipMalloc((void**)&east_deriv_dev, sizeof(float) * n_pixels);
hipMalloc((void**)&diff_coef_dev, sizeof(float) * n_pixels);
hipMalloc((void**)&image_dev, sizeof(unsigned char) * n_pixels);
hipMemcpy(image_dev, image, sizeof(unsigned char) * n_pixels, hipMemcpyHostToDevice);
const int reduction_blocks = n_pixels/BLOCK_SIZE + (n_pixels % BLOCK_SIZE == 0 ? 0 : 1);
const int block_row = height/TILE_DIM + (height % TILE_DIM == 0 ? 0 : 1);
const int block_col = width/TILE_DIM + (width % TILE_DIM == 0 ? 0 : 1);
const dim3 blocks(block_col, block_row), threads(TILE_DIM,TILE_DIM);
hipMalloc((void**)&sums, sizeof(float)*reduction_blocks);
hipMalloc((void**)&sums2, sizeof(float)*reduction_blocks);
hipMalloc((void**)&std_dev, sizeof(float));
int numblocks = reduction_blocks/2 + (reduction_blocks % 2 == 0 ? 0 : 1);
bool pow2 = isPow2(n_pixels);
// warm up kernel
hipLaunchKernelGGL(( warmup), dim3(blocks),dim3(threads), 0, 0, );
time_4 = get_time();
// Part V: compute --- n_iter * (3 * height * width + 42 * (height-1) * (width-1) + 6) floating point arithmetic operations in totaL
for (int iter = 0; iter < n_iter; iter++) {
if(pow2){
hipLaunchKernelGGL(( reduce1<float, BLOCK_SIZE, true>), dim3(reduction_blocks), dim3(BLOCK_SIZE), BLOCK_SIZE * sizeof(float), 0, image_dev, sums, n_pixels);
hipLaunchKernelGGL(( reduce2<float, BLOCK_SIZE, true>), dim3(reduction_blocks), dim3(BLOCK_SIZE), BLOCK_SIZE * sizeof(float), 0, image_dev, sums2, n_pixels);
} else {
hipLaunchKernelGGL(( reduce1<float, BLOCK_SIZE, false>), dim3(reduction_blocks), dim3(BLOCK_SIZE), BLOCK_SIZE * sizeof(float), 0, image_dev, sums, n_pixels);
hipLaunchKernelGGL(( reduce2<float, BLOCK_SIZE, false>), dim3(reduction_blocks), dim3(BLOCK_SIZE), BLOCK_SIZE * sizeof(float), 0, image_dev, sums2, n_pixels);
}
hipLaunchKernelGGL(( standard_dev), dim3(1),dim3(1), 0, 0, sums, sums2, std_dev, n_pixels, numblocks);
hipLaunchKernelGGL(( compute1), dim3(blocks), dim3(threads), 0, 0, image_dev, diff_coef_dev, std_dev, width, height,
north_deriv_dev, south_deriv_dev, east_deriv_dev, west_deriv_dev);
hipLaunchKernelGGL(( compute2), dim3(blocks), dim3(threads), 0, 0, image_dev, diff_coef_dev, north_deriv_dev, south_deriv_dev,
east_deriv_dev, west_deriv_dev, lambda, width, height);
hipDeviceSynchronize();
}
time_5 = get_time();
// Part VI: write image to file
hipMemcpy(image, image_dev, sizeof(unsigned char)*n_pixels, hipMemcpyDeviceToHost);
stbi_write_png(outputname, width, height, pixelWidth, image, 0);
time_6 = get_time();
// Part VII: get average of sum of pixels for testing and calculate GFLOPS
// FOR VALIDATION - DO NOT PARALLELIZE
float test = 0;
for (int i = 0; i < height; i++) {
for (int j = 0; j < width; j++) {
test += image[i * width + j];
}
}
test /= n_pixels;
float gflops = (float) (n_iter * 1E-9 * (3 * height * width + 42 * (height-1) * (width-1) + 6)) / (time_5 - time_4);
time_7 = get_time();
// Part VII: deallocate variables
stbi_image_free(image);
hipFree(north_deriv_dev);
hipFree(south_deriv_dev);
hipFree(east_deriv_dev);
hipFree(west_deriv_dev);
hipFree(diff_coef_dev);
hipFree(image_dev);
hipFree(std_dev);
hipFree(sums);
hipFree(sums2);
time_8 = get_time();
// print
printf("Time spent in different stages of the application:\n");
printf("%9.6f s => Part I: allocate and initialize variables\n", (time_1 - time_0));
printf("%9.6f s => Part II: parse command line arguments\n", (time_2 - time_1));
printf("%9.6f s => Part III: read image\n", (time_3 - time_2));
printf("%9.6f s => Part IV: allocate variables\n", (time_4 - time_3));
printf("%9.6f s => Part V: compute\n", (time_5 - time_4));
printf("%9.6f s => Part VI: write image to file\n", (time_6 - time_5));
printf("%9.6f s => Part VII: get average of sum of pixels for testing and calculate GFLOPS\n", (time_7 - time_6));
printf("%9.6f s => Part VIII: deallocate variables\n", (time_7 - time_6));
printf("Total time: %9.6f s\n", (time_8 - time_0));
printf("Average of sum of pixels: %9.6f\n", test);
printf("GFLOPS: %f\n", gflops);
return 0;
}
| b7c2b8439e4ea867504be55431ecc78c8461cc43.cu | /*
* noise_remover.cpp
*
* This program removes noise from an image based on Speckle Reducing Anisotropic Diffusion
* Y. Yu, S. Acton, Speckle reducing anisotropic diffusion,
* IEEE Transactions on Image Processing 11(11)(2002) 1260-1270 <http://people.virginia.edu/~sc5nf/01097762.pdf>
* Original implementation is Modified by Burak BASTEM
*/
#include <stdlib.h>
#include <stdio.h>
#include <math.h>
#include <string.h>
#include <sys/time.h>
#define STB_IMAGE_IMPLEMENTATION
#include "stb_image.h"
#define STB_IMAGE_WRITE_IMPLEMENTATION
#include "stb_image_write.h"
#include <cooperative_groups.h>
#define BLOCK_SIZE 256
#define TILE_DIM 32
#define MATCH(s) (!strcmp(argv[ac], (s)))
// returns the current time
static const double kMicro = 1.0e-6;
double get_time() {
struct timeval TV;
struct timezone TZ;
const int RC = gettimeofday(&TV, &TZ);
if(RC == -1) {
printf("ERROR: Bad call to gettimeofday\n");
return(-1);
}
return( ((double)TV.tv_sec) + kMicro * ((double)TV.tv_usec) );
}
namespace cg = cooperative_groups;
// Utility class used to avoid linker errors with extern
// unsized shared memory arrays with templated type
template <class T>
struct SharedMemory {
__device__ inline operator T *() {
extern __shared__ int __smem[];
return (T *)__smem;
}
__device__ inline operator const T *() const {
extern __shared__ int __smem[];
return (T *)__smem;
}
};
// specialize for double to avoid unaligned memory
// access compile errors
template <>
struct SharedMemory<double> {
__device__ inline operator double *() {
extern __shared__ double __smem_d[];
return (double *)__smem_d;
}
__device__ inline operator const double *() const {
extern __shared__ double __smem_d[];
return (double *)__smem_d;
}
};
__global__ void warmup(){}
__global__ void compute1(unsigned char* image, float* diff_coef, float* std_dev, int width, int height,
float* north, float* south, float* east, float* west)
{
int col = blockIdx.x * blockDim.x + threadIdx.x;
int row = blockIdx.y * blockDim.y + threadIdx.y;
int index = row * width + col;
if(col < width - 1 && row < height - 1 && row > 0 && col > 0)
{
float north_k, south_k, west_k, east_k, image_k, deviation, coef;
image_k = image[index];
deviation = std_dev[0];
north[index] = north_k = image[index - width] - image_k;
south[index] = south_k = image[index + width] - image_k;
west[index] = west_k = image[index - 1] - image_k;
east[index] = east_k = image[index + 1] - image_k;
float gradient_square = ( north_k * north_k
+ south_k * south_k
+ west_k * west_k
+ east_k * east_k ) / (image_k * image_k);
float laplacian = (north_k + south_k + west_k + east_k) / image_k;
float num = (0.5 * gradient_square) - ((1.0 / 16.0) * (laplacian * laplacian));
float den = 1 + (.25 * laplacian);
float std_dev2 = num / (den * den);
den = (std_dev2 - deviation) / (deviation * (1 + deviation));
coef = 1.0 / (1.0 + den);
if (coef < 0) {
diff_coef[index] = 0;
} else if (coef > 1){
diff_coef[index] = 1;
} else {
diff_coef[index] = coef;
}
}
}
__global__ void compute2(unsigned char* image, float* diff_coef, float* north, float* south,
float* east, float* west, float lambda, int width, int height)
{
__shared__ float temp[TILE_DIM + 1][TILE_DIM + 1];
int ty = threadIdx.y, tx = threadIdx.x, bx = blockIdx.x, by = blockIdx.y;
int col = bx * blockDim.x + tx;
int row = by * blockDim.y + ty;
int index = row * width + col;
temp[ty][tx] = diff_coef[index];
if(tx == 0){
temp[ty][TILE_DIM] = diff_coef[index + TILE_DIM];
}
if(ty == 0){
temp[TILE_DIM][tx] = diff_coef[(TILE_DIM + row) * width + col];
}
__syncthreads();
if(row > 0 && col > 0 && row < height - 1 && col < width - 1){
float diff_coef_north = temp[ty][tx];
float diff_coef_south = temp[ty + 1][tx];
float diff_coef_west = temp[ty][tx];
float diff_coef_east = temp[ty][tx + 1];
float divergence = diff_coef_north * north[index]
+ diff_coef_south * south[index]
+ diff_coef_west * west[index]
+ diff_coef_east * east[index];
image[index] = image[index] + 0.25 * lambda * divergence;
}
}
template <class T, unsigned int blockSize, bool nIsPow2>
__global__ void reduce1(unsigned char *g_idata, T *g_odata, unsigned int n) {
// Handle to thread block group
cg::thread_block cta = cg::this_thread_block();
T *sdata = SharedMemory<T>();
// perform first level of reduction,
// reading from global memory, writing to shared memory
unsigned int tid = threadIdx.x;
unsigned int i = blockIdx.x * blockSize * 2 + threadIdx.x;
unsigned int gridSize = blockSize * 2 * gridDim.x;
T mySum = 0;
// we reduce multiple elements per thread. The number is determined by the
// number of active thread blocks (via gridDim). More blocks will result
// in a larger gridSize and therefore fewer elements per thread
while (i < n) {
mySum += g_idata[i];
// ensure we don't read out of bounds -- this is optimized away for powerOf2
// sized arrays
if (nIsPow2 || i + blockSize < n) mySum += g_idata[i + blockSize];
i += gridSize;
}
// each thread puts its local sum into shared memory
sdata[tid] = mySum;
cg::sync(cta);
// do reduction in shared mem
if ((blockSize >= 512) && (tid < 256)) {
sdata[tid] = mySum = mySum + sdata[tid + 256];
}
cg::sync(cta);
if ((blockSize >= 256) && (tid < 128)) {
sdata[tid] = mySum = mySum + sdata[tid + 128];
}
cg::sync(cta);
if ((blockSize >= 128) && (tid < 64)) {
sdata[tid] = mySum = mySum + sdata[tid + 64];
}
cg::sync(cta);
cg::thread_block_tile<32> tile32 = cg::tiled_partition<32>(cta);
if (cta.thread_rank() < 32) {
// Fetch final intermediate sum from 2nd warp
if (blockSize >= 64) mySum += sdata[tid + 32];
// Reduce final warp using shuffle
for (int offset = tile32.size() / 2; offset > 0; offset /= 2) {
mySum += tile32.shfl_down(mySum, offset);
}
}
// write result for this block to global mem
if (cta.thread_rank() == 0) g_odata[blockIdx.x] = mySum;
}
template <class T, unsigned int blockSize, bool nIsPow2>
__global__ void reduce2(unsigned char *g_idata, T *g_odata, unsigned int n) {
// Handle to thread block group
cg::thread_block cta = cg::this_thread_block();
T *sdata = SharedMemory<T>();
// perform first level of reduction,
// reading from global memory, writing to shared memory
unsigned int tid = threadIdx.x;
unsigned int i = blockIdx.x * blockSize * 2 + threadIdx.x;
unsigned int gridSize = blockSize * 2 * gridDim.x;
T mySum = 0;
// we reduce multiple elements per thread. The number is determined by the
// number of active thread blocks (via gridDim). More blocks will result
// in a larger gridSize and therefore fewer elements per thread
while (i < n) {
mySum += g_idata[i] * g_idata[i];
// ensure we don't read out of bounds -- this is optimized away for powerOf2
// sized arrays
if (nIsPow2 || i + blockSize < n) mySum += g_idata[i + blockSize] * g_idata[i + blockSize];
i += gridSize;
}
// each thread puts its local sum into shared memory
sdata[tid] = mySum;
cg::sync(cta);
// do reduction in shared mem
if ((blockSize >= 512) && (tid < 256)) {
sdata[tid] = mySum = mySum + sdata[tid + 256];
}
cg::sync(cta);
if ((blockSize >= 256) && (tid < 128)) {
sdata[tid] = mySum = mySum + sdata[tid + 128];
}
cg::sync(cta);
if ((blockSize >= 128) && (tid < 64)) {
sdata[tid] = mySum = mySum + sdata[tid + 64];
}
cg::sync(cta);
cg::thread_block_tile<32> tile32 = cg::tiled_partition<32>(cta);
if (cta.thread_rank() < 32) {
// Fetch final intermediate sum from 2nd warp
if (blockSize >= 64) mySum += sdata[tid + 32];
// Reduce final warp using shuffle
for (int offset = tile32.size() / 2; offset > 0; offset /= 2) {
mySum += tile32.shfl_down(mySum, offset);
}
}
// write result for this block to global mem
if (cta.thread_rank() == 0) g_odata[blockIdx.x] = mySum;
}
__global__ void standard_dev(float* sums, float* sums2, float* std_dev, int size, int numBlocks)
{
float sum = 0, sum2 = 0;
for(int i=0; i < numBlocks; i++){
sum += sums[i];
sum2 += sums2[i];
}
float mean = sum / size;
float variance = (sum2 / size) - mean * mean; // --- 3 floating point arithmetic operations
std_dev[0] = variance / (mean * mean);
}
extern "C" bool isPow2(unsigned int x) { return ((x & (x - 1)) == 0); }
int main(int argc, char *argv[]) {
// Part I: allocate and initialize variables
double time_0, time_1, time_2, time_3, time_4, time_5, time_6, time_7, time_8; // time variables
time_0 = get_time();
const char *filename = "input.pgm";
const char *outputname = "output.png";
int width, height, pixelWidth, n_pixels;
int n_iter = 50;
float lambda = 0.5;
float *north_deriv_dev, *south_deriv_dev, *west_deriv_dev, *east_deriv_dev; // device derivatives
float *sums, *sums2, *std_dev; // calculation variables
float *diff_coef_dev; // diffusion coefficient
unsigned char *image_dev;
time_1 = get_time();
// Part II: parse command line arguments
if(argc<2) {
printf("Usage: %s [-i < filename>] [-iter <n_iter>] [-l <lambda>] [-o <outputfilename>]\n",argv[0]);
return(-1);
}
for(int ac=1;ac<argc;ac++) {
if(MATCH("-i")) {
filename = argv[++ac];
} else if(MATCH("-iter")) {
n_iter = atoi(argv[++ac]);
} else if(MATCH("-l")) {
lambda = atof(argv[++ac]);
} else if(MATCH("-o")) {
outputname = argv[++ac];
} else {
printf("Usage: %s [-i < filename>] [-iter <n_iter>] [-l <lambda>] [-o <outputfilename>]\n",argv[0]);
return(-1);
}
}
time_2 = get_time();
// Part III: read image
printf("Reading image...\n");
unsigned char *image = stbi_load(filename, &width, &height, &pixelWidth, 0);
if (!image) {
fprintf(stderr, "Couldn't load image.\n");
return (-1);
}
printf("Image Read. Width : %d, Height : %d, nComp: %d\n",width,height,pixelWidth);
n_pixels = height * width;
time_3 = get_time();
// Part IV: allocate variables
cudaMalloc((void**)&north_deriv_dev, sizeof(float) * n_pixels);
cudaMalloc((void**)&south_deriv_dev, sizeof(float) * n_pixels);
cudaMalloc((void**)&west_deriv_dev, sizeof(float) * n_pixels);
cudaMalloc((void**)&east_deriv_dev, sizeof(float) * n_pixels);
cudaMalloc((void**)&diff_coef_dev, sizeof(float) * n_pixels);
cudaMalloc((void**)&image_dev, sizeof(unsigned char) * n_pixels);
cudaMemcpy(image_dev, image, sizeof(unsigned char) * n_pixels, cudaMemcpyHostToDevice);
const int reduction_blocks = n_pixels/BLOCK_SIZE + (n_pixels % BLOCK_SIZE == 0 ? 0 : 1);
const int block_row = height/TILE_DIM + (height % TILE_DIM == 0 ? 0 : 1);
const int block_col = width/TILE_DIM + (width % TILE_DIM == 0 ? 0 : 1);
const dim3 blocks(block_col, block_row), threads(TILE_DIM,TILE_DIM);
cudaMalloc((void**)&sums, sizeof(float)*reduction_blocks);
cudaMalloc((void**)&sums2, sizeof(float)*reduction_blocks);
cudaMalloc((void**)&std_dev, sizeof(float));
int numblocks = reduction_blocks/2 + (reduction_blocks % 2 == 0 ? 0 : 1);
bool pow2 = isPow2(n_pixels);
// warm up kernel
warmup<<<blocks,threads>>>();
time_4 = get_time();
// Part V: compute --- n_iter * (3 * height * width + 42 * (height-1) * (width-1) + 6) floating point arithmetic operations in totaL
for (int iter = 0; iter < n_iter; iter++) {
if(pow2){
reduce1<float, BLOCK_SIZE, true><<<reduction_blocks, BLOCK_SIZE, BLOCK_SIZE * sizeof(float)>>>(image_dev, sums, n_pixels);
reduce2<float, BLOCK_SIZE, true><<<reduction_blocks, BLOCK_SIZE, BLOCK_SIZE * sizeof(float)>>>(image_dev, sums2, n_pixels);
} else {
reduce1<float, BLOCK_SIZE, false><<<reduction_blocks, BLOCK_SIZE, BLOCK_SIZE * sizeof(float)>>>(image_dev, sums, n_pixels);
reduce2<float, BLOCK_SIZE, false><<<reduction_blocks, BLOCK_SIZE, BLOCK_SIZE * sizeof(float)>>>(image_dev, sums2, n_pixels);
}
standard_dev<<<1,1>>>(sums, sums2, std_dev, n_pixels, numblocks);
compute1<<<blocks, threads>>>(image_dev, diff_coef_dev, std_dev, width, height,
north_deriv_dev, south_deriv_dev, east_deriv_dev, west_deriv_dev);
compute2<<<blocks, threads>>>(image_dev, diff_coef_dev, north_deriv_dev, south_deriv_dev,
east_deriv_dev, west_deriv_dev, lambda, width, height);
cudaDeviceSynchronize();
}
time_5 = get_time();
// Part VI: write image to file
cudaMemcpy(image, image_dev, sizeof(unsigned char)*n_pixels, cudaMemcpyDeviceToHost);
stbi_write_png(outputname, width, height, pixelWidth, image, 0);
time_6 = get_time();
// Part VII: get average of sum of pixels for testing and calculate GFLOPS
// FOR VALIDATION - DO NOT PARALLELIZE
float test = 0;
for (int i = 0; i < height; i++) {
for (int j = 0; j < width; j++) {
test += image[i * width + j];
}
}
test /= n_pixels;
float gflops = (float) (n_iter * 1E-9 * (3 * height * width + 42 * (height-1) * (width-1) + 6)) / (time_5 - time_4);
time_7 = get_time();
// Part VII: deallocate variables
stbi_image_free(image);
cudaFree(north_deriv_dev);
cudaFree(south_deriv_dev);
cudaFree(east_deriv_dev);
cudaFree(west_deriv_dev);
cudaFree(diff_coef_dev);
cudaFree(image_dev);
cudaFree(std_dev);
cudaFree(sums);
cudaFree(sums2);
time_8 = get_time();
// print
printf("Time spent in different stages of the application:\n");
printf("%9.6f s => Part I: allocate and initialize variables\n", (time_1 - time_0));
printf("%9.6f s => Part II: parse command line arguments\n", (time_2 - time_1));
printf("%9.6f s => Part III: read image\n", (time_3 - time_2));
printf("%9.6f s => Part IV: allocate variables\n", (time_4 - time_3));
printf("%9.6f s => Part V: compute\n", (time_5 - time_4));
printf("%9.6f s => Part VI: write image to file\n", (time_6 - time_5));
printf("%9.6f s => Part VII: get average of sum of pixels for testing and calculate GFLOPS\n", (time_7 - time_6));
printf("%9.6f s => Part VIII: deallocate variables\n", (time_7 - time_6));
printf("Total time: %9.6f s\n", (time_8 - time_0));
printf("Average of sum of pixels: %9.6f\n", test);
printf("GFLOPS: %f\n", gflops);
return 0;
}
|
18a94a7c723d38c2b2fc2c811824fb194062d4b3.hip | // !!! This is a file automatically generated by hipify!!!
#include "object/geometry/plane.hpp"
#include <cfloat>
using namespace px;
BasePlane::BasePlane(Point const &pos,
Direction const &norm_vec)
: _pos(pos), _dev_obj(nullptr)
{
setNormal(norm_vec);
}
PX_CUDA_CALLABLE
GeometryObj *BasePlane::hitCheck(void * const &obj,
Ray const &ray,
PREC const &t_start,
PREC const &t_end,
PREC &hit_at)
{
auto o = reinterpret_cast<BasePlane*>(obj);
auto tmp = (o->_p_dot_n - ray.original.dot(o->_norm)) / ray.direction.dot(o->_norm);
return (tmp > t_start && tmp < t_end) ? (hit_at = tmp, o->_dev_obj) : nullptr;
}
PX_CUDA_CALLABLE
Direction BasePlane::normalVec(void * const &obj,
PREC const &x, PREC const &y, PREC const &z,
bool &double_face)
{
double_face = true;
return reinterpret_cast<BasePlane*>(obj)->_norm;
}
PX_CUDA_CALLABLE
Vec3<PREC> BasePlane::getTextureCoord(void * const &obj,
PREC const &x, PREC const &y, PREC const &z)
{
auto o = reinterpret_cast<BasePlane*>(obj);
return {x - o->_pos.x,
o->_norm.y*(z - o->_pos.z)-o->_norm.z*(y - o->_pos.y),
(x - o->_pos.x)*o->_norm.x + (y - o->_pos.y)*o->_norm.y + (z - o->_pos.z)*o->_norm.z};
}
void BasePlane::setPos(Point const &position)
{
_pos = position;
_p_dot_n = position.dot(_norm);
}
void BasePlane::setNormal(Direction const &norm_vec)
{
_norm = norm_vec;
_p_dot_n = _pos.dot(norm_vec);
}
std::shared_ptr<BaseGeometry> Plane::create(Point const &position,
Direction const &norm_vec,
std::shared_ptr<BaseMaterial> const &material,
std::shared_ptr<Transformation> const &trans)
{
return std::shared_ptr<BaseGeometry>(new Plane(position, norm_vec, material, trans));
}
Plane::Plane(Point const &position,
Direction const &norm_vec,
std::shared_ptr<BaseMaterial> const &material,
std::shared_ptr<Transformation> const &trans)
: BaseGeometry(material, trans, 4),
_obj(new BasePlane(position, norm_vec)),
_gpu_obj(nullptr), _need_upload(true)
{
_obj->_dev_obj = reinterpret_cast<GeometryObj*>(this);
_updateVertices();
}
Plane::~Plane()
{
delete _obj;
#ifdef USE_ROCM
clearGpuData();
#endif
}
#ifdef USE_ROCM
__device__ fnHit_t __fn_hit_plane = BasePlane::hitCheck;
__device__ fnNormal_t __fn_normal_plane = BasePlane::normalVec;
__device__ fnTextureCoord_t __fn_texture_coord_plane = BasePlane::getTextureCoord;
#endif
void Plane::up2Gpu()
{
#ifdef USE_ROCM
static fnHit_t fn_hit_h = nullptr;
static fnNormal_t fn_normal_h;
static fnTextureCoord_t fn_texture_coord_h;
if (_need_upload)
{
if (dev_ptr == nullptr)
{
PX_CUDA_CHECK(hipMalloc(&_gpu_obj, sizeof(BasePlane)));
PX_CUDA_CHECK(hipMalloc(&dev_ptr, sizeof(GeometryObj)));
}
if (fn_hit_h == nullptr)
{
PX_CUDA_CHECK(hipMemcpyFromSymbol(&fn_hit_h, __fn_hit_plane, sizeof(fnHit_t)));
PX_CUDA_CHECK(hipMemcpyFromSymbol(&fn_normal_h, __fn_normal_plane, sizeof(fnNormal_t)));
PX_CUDA_CHECK(hipMemcpyFromSymbol(&fn_texture_coord_h, __fn_texture_coord_plane, sizeof(fnTextureCoord_t)));
}
if (mat != nullptr)
mat->up2Gpu();
if (trans != nullptr)
trans->up2Gpu();
_obj->_dev_obj = dev_ptr;
PX_CUDA_CHECK(hipMemcpy(_gpu_obj, _obj, sizeof(BasePlane), hipMemcpyHostToDevice));
_obj->_dev_obj = reinterpret_cast<GeometryObj*>(this);
GeometryObj tmp(_gpu_obj, fn_hit_h, fn_normal_h, fn_texture_coord_h,
mat == nullptr ? nullptr : mat->devPtr(),
trans == nullptr ? nullptr : trans->devPtr());
PX_CUDA_CHECK(hipMemcpy(dev_ptr, &tmp, sizeof(GeometryObj),
hipMemcpyHostToDevice))
_need_upload = false;
}
#endif
}
void Plane::clearGpuData()
{
#ifdef USE_ROCM
BaseGeometry::clearGpuData();
if (_gpu_obj != nullptr)
{
PX_CUDA_CHECK(hipFree(_gpu_obj));
_gpu_obj = nullptr;
}
_need_upload = true;
#endif
}
void Plane::setPos(Point const &position)
{
_obj->setPos(position);
#ifdef USE_ROCM
_need_upload = true;
#endif
}
void Plane::setNormal(Direction const &norm_vec)
{
_obj->setNormal(norm_vec);
_updateVertices();
#ifdef USE_ROCM
_need_upload = true;
#endif
}
void Plane::_updateVertices()
{
if ((_obj->_norm.x == 1 || _obj->_norm.x == -1) && _obj->_norm.y == 0 && _obj->_norm.z == 0)
{
raw_vertices[0].x = 0;
raw_vertices[0].y = -FLT_MAX;
raw_vertices[0].z = -FLT_MAX;
raw_vertices[1].x = 0;
raw_vertices[1].y = FLT_MAX;
raw_vertices[1].z = FLT_MAX;
raw_vertices[2].x = 0;
raw_vertices[2].y = -FLT_MAX;
raw_vertices[2].z = FLT_MAX;
raw_vertices[3].x = 0;
raw_vertices[3].y = FLT_MAX;
raw_vertices[3].z = -FLT_MAX;
}
else if (_obj->_norm.x == 0 && (_obj->_norm.y == 1 || _obj->_norm.y == -1) && _obj->_norm.z == 0)
{
raw_vertices[0].x = -FLT_MAX;
raw_vertices[0].y = 0;
raw_vertices[0].z = -FLT_MAX;
raw_vertices[1].x = FLT_MAX;
raw_vertices[1].y = 0;
raw_vertices[1].z = FLT_MAX;
raw_vertices[2].x = -FLT_MAX;
raw_vertices[2].y = 0;
raw_vertices[2].z = FLT_MAX;
raw_vertices[3].x = FLT_MAX;
raw_vertices[3].y = 0;
raw_vertices[3].z = -FLT_MAX;
}
else if (_obj->_norm.x == 0 && _obj->_norm.y == 0 && (_obj->_norm.z == 1 || _obj->_norm.z == -1))
{
raw_vertices[0].x = -FLT_MAX;
raw_vertices[0].y = -FLT_MAX;
raw_vertices[0].z = 0;
raw_vertices[1].x = FLT_MAX;
raw_vertices[1].y = FLT_MAX;
raw_vertices[1].z = 0;
raw_vertices[2].x = -FLT_MAX;
raw_vertices[2].y = FLT_MAX;
raw_vertices[2].z = 0;
raw_vertices[3].x = FLT_MAX;
raw_vertices[3].y = -FLT_MAX;
raw_vertices[3].z = 0;
}
else if (_obj->_norm.x == 0 && _obj->_norm.y == 0 && _obj->_norm.z == 0)
{
raw_vertices[0].x = 0;
raw_vertices[0].y = 0;
raw_vertices[0].z = 0;
raw_vertices[1].x = 0;
raw_vertices[1].y = 0;
raw_vertices[1].z = 0;
raw_vertices[2].x = 0;
raw_vertices[2].y = 0;
raw_vertices[2].z = 0;
raw_vertices[3].x = 0;
raw_vertices[3].y = 0;
raw_vertices[3].z = 0;
}
else
{
raw_vertices[0].x = -FLT_MAX;
raw_vertices[0].y = -FLT_MAX;
raw_vertices[0].z = -FLT_MAX;
raw_vertices[1].x = FLT_MAX;
raw_vertices[1].y = FLT_MAX;
raw_vertices[1].z = FLT_MAX;
raw_vertices[2].x = -FLT_MAX;
raw_vertices[2].y = FLT_MAX;
raw_vertices[2].z = FLT_MAX;
raw_vertices[3].x = FLT_MAX;
raw_vertices[3].y = -FLT_MAX;
raw_vertices[3].z = -FLT_MAX;
}
}
Vec3<PREC> Plane::getTextureCoord(PREC const &x,
PREC const &y,
PREC const &z) const
{
return BasePlane::getTextureCoord(_obj, x, y, z);
}
const BaseGeometry *Plane::hitCheck(Ray const &ray,
PREC const &t_start,
PREC const &t_end,
PREC &hit_at) const
{
return BasePlane::hitCheck(_obj, ray, t_start, t_end, hit_at) ? this : nullptr;
}
Direction Plane::normalVec(PREC const &x, PREC const &y,
PREC const &z,
bool &double_face) const
{
return BasePlane::normalVec(_obj, x, y, z, double_face);
}
| 18a94a7c723d38c2b2fc2c811824fb194062d4b3.cu | #include "object/geometry/plane.hpp"
#include <cfloat>
using namespace px;
BasePlane::BasePlane(Point const &pos,
Direction const &norm_vec)
: _pos(pos), _dev_obj(nullptr)
{
setNormal(norm_vec);
}
PX_CUDA_CALLABLE
GeometryObj *BasePlane::hitCheck(void * const &obj,
Ray const &ray,
PREC const &t_start,
PREC const &t_end,
PREC &hit_at)
{
auto o = reinterpret_cast<BasePlane*>(obj);
auto tmp = (o->_p_dot_n - ray.original.dot(o->_norm)) / ray.direction.dot(o->_norm);
return (tmp > t_start && tmp < t_end) ? (hit_at = tmp, o->_dev_obj) : nullptr;
}
PX_CUDA_CALLABLE
Direction BasePlane::normalVec(void * const &obj,
PREC const &x, PREC const &y, PREC const &z,
bool &double_face)
{
double_face = true;
return reinterpret_cast<BasePlane*>(obj)->_norm;
}
PX_CUDA_CALLABLE
Vec3<PREC> BasePlane::getTextureCoord(void * const &obj,
PREC const &x, PREC const &y, PREC const &z)
{
auto o = reinterpret_cast<BasePlane*>(obj);
return {x - o->_pos.x,
o->_norm.y*(z - o->_pos.z)-o->_norm.z*(y - o->_pos.y),
(x - o->_pos.x)*o->_norm.x + (y - o->_pos.y)*o->_norm.y + (z - o->_pos.z)*o->_norm.z};
}
void BasePlane::setPos(Point const &position)
{
_pos = position;
_p_dot_n = position.dot(_norm);
}
void BasePlane::setNormal(Direction const &norm_vec)
{
_norm = norm_vec;
_p_dot_n = _pos.dot(norm_vec);
}
std::shared_ptr<BaseGeometry> Plane::create(Point const &position,
Direction const &norm_vec,
std::shared_ptr<BaseMaterial> const &material,
std::shared_ptr<Transformation> const &trans)
{
return std::shared_ptr<BaseGeometry>(new Plane(position, norm_vec, material, trans));
}
Plane::Plane(Point const &position,
Direction const &norm_vec,
std::shared_ptr<BaseMaterial> const &material,
std::shared_ptr<Transformation> const &trans)
: BaseGeometry(material, trans, 4),
_obj(new BasePlane(position, norm_vec)),
_gpu_obj(nullptr), _need_upload(true)
{
_obj->_dev_obj = reinterpret_cast<GeometryObj*>(this);
_updateVertices();
}
Plane::~Plane()
{
delete _obj;
#ifdef USE_CUDA
clearGpuData();
#endif
}
#ifdef USE_CUDA
__device__ fnHit_t __fn_hit_plane = BasePlane::hitCheck;
__device__ fnNormal_t __fn_normal_plane = BasePlane::normalVec;
__device__ fnTextureCoord_t __fn_texture_coord_plane = BasePlane::getTextureCoord;
#endif
void Plane::up2Gpu()
{
#ifdef USE_CUDA
static fnHit_t fn_hit_h = nullptr;
static fnNormal_t fn_normal_h;
static fnTextureCoord_t fn_texture_coord_h;
if (_need_upload)
{
if (dev_ptr == nullptr)
{
PX_CUDA_CHECK(cudaMalloc(&_gpu_obj, sizeof(BasePlane)));
PX_CUDA_CHECK(cudaMalloc(&dev_ptr, sizeof(GeometryObj)));
}
if (fn_hit_h == nullptr)
{
PX_CUDA_CHECK(cudaMemcpyFromSymbol(&fn_hit_h, __fn_hit_plane, sizeof(fnHit_t)));
PX_CUDA_CHECK(cudaMemcpyFromSymbol(&fn_normal_h, __fn_normal_plane, sizeof(fnNormal_t)));
PX_CUDA_CHECK(cudaMemcpyFromSymbol(&fn_texture_coord_h, __fn_texture_coord_plane, sizeof(fnTextureCoord_t)));
}
if (mat != nullptr)
mat->up2Gpu();
if (trans != nullptr)
trans->up2Gpu();
_obj->_dev_obj = dev_ptr;
PX_CUDA_CHECK(cudaMemcpy(_gpu_obj, _obj, sizeof(BasePlane), cudaMemcpyHostToDevice));
_obj->_dev_obj = reinterpret_cast<GeometryObj*>(this);
GeometryObj tmp(_gpu_obj, fn_hit_h, fn_normal_h, fn_texture_coord_h,
mat == nullptr ? nullptr : mat->devPtr(),
trans == nullptr ? nullptr : trans->devPtr());
PX_CUDA_CHECK(cudaMemcpy(dev_ptr, &tmp, sizeof(GeometryObj),
cudaMemcpyHostToDevice))
_need_upload = false;
}
#endif
}
void Plane::clearGpuData()
{
#ifdef USE_CUDA
BaseGeometry::clearGpuData();
if (_gpu_obj != nullptr)
{
PX_CUDA_CHECK(cudaFree(_gpu_obj));
_gpu_obj = nullptr;
}
_need_upload = true;
#endif
}
void Plane::setPos(Point const &position)
{
_obj->setPos(position);
#ifdef USE_CUDA
_need_upload = true;
#endif
}
void Plane::setNormal(Direction const &norm_vec)
{
_obj->setNormal(norm_vec);
_updateVertices();
#ifdef USE_CUDA
_need_upload = true;
#endif
}
void Plane::_updateVertices()
{
if ((_obj->_norm.x == 1 || _obj->_norm.x == -1) && _obj->_norm.y == 0 && _obj->_norm.z == 0)
{
raw_vertices[0].x = 0;
raw_vertices[0].y = -FLT_MAX;
raw_vertices[0].z = -FLT_MAX;
raw_vertices[1].x = 0;
raw_vertices[1].y = FLT_MAX;
raw_vertices[1].z = FLT_MAX;
raw_vertices[2].x = 0;
raw_vertices[2].y = -FLT_MAX;
raw_vertices[2].z = FLT_MAX;
raw_vertices[3].x = 0;
raw_vertices[3].y = FLT_MAX;
raw_vertices[3].z = -FLT_MAX;
}
else if (_obj->_norm.x == 0 && (_obj->_norm.y == 1 || _obj->_norm.y == -1) && _obj->_norm.z == 0)
{
raw_vertices[0].x = -FLT_MAX;
raw_vertices[0].y = 0;
raw_vertices[0].z = -FLT_MAX;
raw_vertices[1].x = FLT_MAX;
raw_vertices[1].y = 0;
raw_vertices[1].z = FLT_MAX;
raw_vertices[2].x = -FLT_MAX;
raw_vertices[2].y = 0;
raw_vertices[2].z = FLT_MAX;
raw_vertices[3].x = FLT_MAX;
raw_vertices[3].y = 0;
raw_vertices[3].z = -FLT_MAX;
}
else if (_obj->_norm.x == 0 && _obj->_norm.y == 0 && (_obj->_norm.z == 1 || _obj->_norm.z == -1))
{
raw_vertices[0].x = -FLT_MAX;
raw_vertices[0].y = -FLT_MAX;
raw_vertices[0].z = 0;
raw_vertices[1].x = FLT_MAX;
raw_vertices[1].y = FLT_MAX;
raw_vertices[1].z = 0;
raw_vertices[2].x = -FLT_MAX;
raw_vertices[2].y = FLT_MAX;
raw_vertices[2].z = 0;
raw_vertices[3].x = FLT_MAX;
raw_vertices[3].y = -FLT_MAX;
raw_vertices[3].z = 0;
}
else if (_obj->_norm.x == 0 && _obj->_norm.y == 0 && _obj->_norm.z == 0)
{
raw_vertices[0].x = 0;
raw_vertices[0].y = 0;
raw_vertices[0].z = 0;
raw_vertices[1].x = 0;
raw_vertices[1].y = 0;
raw_vertices[1].z = 0;
raw_vertices[2].x = 0;
raw_vertices[2].y = 0;
raw_vertices[2].z = 0;
raw_vertices[3].x = 0;
raw_vertices[3].y = 0;
raw_vertices[3].z = 0;
}
else
{
raw_vertices[0].x = -FLT_MAX;
raw_vertices[0].y = -FLT_MAX;
raw_vertices[0].z = -FLT_MAX;
raw_vertices[1].x = FLT_MAX;
raw_vertices[1].y = FLT_MAX;
raw_vertices[1].z = FLT_MAX;
raw_vertices[2].x = -FLT_MAX;
raw_vertices[2].y = FLT_MAX;
raw_vertices[2].z = FLT_MAX;
raw_vertices[3].x = FLT_MAX;
raw_vertices[3].y = -FLT_MAX;
raw_vertices[3].z = -FLT_MAX;
}
}
Vec3<PREC> Plane::getTextureCoord(PREC const &x,
PREC const &y,
PREC const &z) const
{
return BasePlane::getTextureCoord(_obj, x, y, z);
}
const BaseGeometry *Plane::hitCheck(Ray const &ray,
PREC const &t_start,
PREC const &t_end,
PREC &hit_at) const
{
return BasePlane::hitCheck(_obj, ray, t_start, t_end, hit_at) ? this : nullptr;
}
Direction Plane::normalVec(PREC const &x, PREC const &y,
PREC const &z,
bool &double_face) const
{
return BasePlane::normalVec(_obj, x, y, z, double_face);
}
|
29188d8148e5bf4b024cd2867da36e9b6d8b8ed4.hip | // !!! This is a file automatically generated by hipify!!!
#include <assert.h>
#include <stdlib.h>
#include <stdint.h>
#include <stdio.h>
#include <string.h>
#include <math.h>
#include <stdlib.h>
#include <sys/time.h>
#include <unistd.h>
// includes, project
#include <hip/hip_runtime.h>
#include <nba/core/errors.hh>
#include <nba/core/accumidx.hh>
#include <nba/engines/cuda/utils.hh>
#include "IPlookup_kernel.hh"
#define IGNORED_IP 0xFFffFFffu
#include <nba/framework/datablock_shared.hh>
extern "C" {
/* The index is given by the order in get_used_datablocks(). */
#define dbid_ipv4_dest_addrs_d (0)
#define dbid_ipv4_lookup_results_d (1)
__device__ static inline uint32_t ntohl(uint32_t n)
{
return ((n & 0xff000000) >> 24) | ((n & 0x00ff0000) >> 8) | \
((n & 0x0000ff00) << 8) | ((n & 0x000000ff) << 24);
}
/* The GPU kernel. */
__global__ void ipv4_route_lookup_cuda(
struct datablock_kernel_arg **datablocks,
uint32_t count, uint32_t *item_counts, uint32_t num_batches,
uint8_t *checkbits_d,
uint16_t* __restrict__ TBL24_d,
uint16_t* __restrict__ TBLlong_d)
{
uint32_t idx = blockIdx.x * blockDim.x + threadIdx.x;
if (idx < count) {
uint32_t batch_idx, item_idx;
assert(nba::NBA_SUCCESS == nba::get_accum_idx(item_counts, num_batches,
idx, batch_idx, item_idx));
struct datablock_kernel_arg *db_dest_addrs = datablocks[dbid_ipv4_dest_addrs_d];
struct datablock_kernel_arg *db_results = datablocks[dbid_ipv4_lookup_results_d];
uint32_t daddr = ((uint32_t*) db_dest_addrs->batches[batch_idx].buffer_bases)[item_idx];
uint16_t *lookup_result = &((uint16_t *)db_results->batches[batch_idx].buffer_bases)[item_idx];
if (daddr == IGNORED_IP) {
*lookup_result = 0;
} else {
daddr = ntohl(daddr);
uint16_t temp_dest = TBL24_d[daddr >> 8];
if (temp_dest & 0x8000u) {
int index2 = (((uint32_t)(temp_dest & 0x7fff)) << 8) + (daddr & 0xff);
temp_dest = TBLlong_d[index2];
}
*lookup_result = temp_dest;
}
}
__syncthreads();
if (threadIdx.x == 0 && checkbits_d != NULL) {
checkbits_d[blockIdx.x] = 1;
}
}
}
void *nba::ipv4_route_lookup_get_cuda_kernel() {
return reinterpret_cast<void *> (ipv4_route_lookup_cuda);
}
// vim: ts=8 sts=4 sw=4 et
| 29188d8148e5bf4b024cd2867da36e9b6d8b8ed4.cu | #include <assert.h>
#include <stdlib.h>
#include <stdint.h>
#include <stdio.h>
#include <string.h>
#include <math.h>
#include <stdlib.h>
#include <sys/time.h>
#include <unistd.h>
// includes, project
#include <cuda.h>
#include <nba/core/errors.hh>
#include <nba/core/accumidx.hh>
#include <nba/engines/cuda/utils.hh>
#include "IPlookup_kernel.hh"
#define IGNORED_IP 0xFFffFFffu
#include <nba/framework/datablock_shared.hh>
extern "C" {
/* The index is given by the order in get_used_datablocks(). */
#define dbid_ipv4_dest_addrs_d (0)
#define dbid_ipv4_lookup_results_d (1)
__device__ static inline uint32_t ntohl(uint32_t n)
{
return ((n & 0xff000000) >> 24) | ((n & 0x00ff0000) >> 8) | \
((n & 0x0000ff00) << 8) | ((n & 0x000000ff) << 24);
}
/* The GPU kernel. */
__global__ void ipv4_route_lookup_cuda(
struct datablock_kernel_arg **datablocks,
uint32_t count, uint32_t *item_counts, uint32_t num_batches,
uint8_t *checkbits_d,
uint16_t* __restrict__ TBL24_d,
uint16_t* __restrict__ TBLlong_d)
{
uint32_t idx = blockIdx.x * blockDim.x + threadIdx.x;
if (idx < count) {
uint32_t batch_idx, item_idx;
assert(nba::NBA_SUCCESS == nba::get_accum_idx(item_counts, num_batches,
idx, batch_idx, item_idx));
struct datablock_kernel_arg *db_dest_addrs = datablocks[dbid_ipv4_dest_addrs_d];
struct datablock_kernel_arg *db_results = datablocks[dbid_ipv4_lookup_results_d];
uint32_t daddr = ((uint32_t*) db_dest_addrs->batches[batch_idx].buffer_bases)[item_idx];
uint16_t *lookup_result = &((uint16_t *)db_results->batches[batch_idx].buffer_bases)[item_idx];
if (daddr == IGNORED_IP) {
*lookup_result = 0;
} else {
daddr = ntohl(daddr);
uint16_t temp_dest = TBL24_d[daddr >> 8];
if (temp_dest & 0x8000u) {
int index2 = (((uint32_t)(temp_dest & 0x7fff)) << 8) + (daddr & 0xff);
temp_dest = TBLlong_d[index2];
}
*lookup_result = temp_dest;
}
}
__syncthreads();
if (threadIdx.x == 0 && checkbits_d != NULL) {
checkbits_d[blockIdx.x] = 1;
}
}
}
void *nba::ipv4_route_lookup_get_cuda_kernel() {
return reinterpret_cast<void *> (ipv4_route_lookup_cuda);
}
// vim: ts=8 sts=4 sw=4 et
|
5200792eea5a8a29a96f30c72bfa6b46b0b995d5.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include<stdio.h>
#include<stdlib.h>
#include<math.h>
#define N 8192
#define LINEAR_SIDE_X 32
#define LINEAR_SIDE_Y 16
void print_matrix(int *p){
for(int i = 0;i<N;i++){
for(int j=0;j<N;j++){printf("%d ",p[i*N + j]);}
printf("\n");
}
}
void fill_matrix(int *p){
for(int i = 0; i<N;i++){
for(int j = 0; j<N;j++){p[i*N + j] = rand()%100 + 1;}
}
}
bool verify(int *a,int*b){
for(int i =0;i<N*N;i++){if(a[i]!=b[(i%N)*N + i/N]) return 0;}
return 1;
}
__global__ void transpose(int *mat_in_dev, int *mat_out_dev){
//local memory location where to save matrix portion
//related to the blocks
// shared by all threads within a block
__shared__ int temp_matrix[LINEAR_SIDE_X][LINEAR_SIDE_Y];
// creation of the global indexes in order to journey to the matrix
int global_x = blockIdx.x*blockDim.x + threadIdx.x;
int global_y = blockIdx.y*blockDim.y + threadIdx.y;
// now we created global indexes referred to transpose matrix
//copy in the sub matrix
temp_matrix[threadIdx.x][threadIdx.y] = mat_in_dev[global_x*N + global_y];
__syncthreads();
//copy submatrix in out
mat_out_dev[global_y*N + global_x]=temp_matrix[threadIdx.x][threadIdx.y];
}
int main(void){
int *mat_in_h;
int *mat_out_h;
int *mat_in_dev;
int *mat_out_dev;
//allocation of memory
mat_in_h = (int*)malloc(N*N*sizeof(int));
mat_out_h = (int*)malloc(N*N*sizeof(int));
hipMalloc((void**)&mat_in_dev,N*N*sizeof(int));
hipMalloc((void**)&mat_out_dev,N*N*sizeof(int));
fill_matrix(mat_in_h);
//copy matrix from host to device
int size = N*N*sizeof(int);
hipMemcpy(mat_in_dev,mat_in_h,size,hipMemcpyHostToDevice);
// definition of variables dim3
dim3 grid,block;
block.x = LINEAR_SIDE_X;
block.y = LINEAR_SIDE_Y;
grid.x = N/block.x;
grid.y = N/block.y;
// timing
hipEvent_t start, stop;
hipEventCreate(&start);
hipEventCreate(&stop);
hipEventRecord(start);
hipLaunchKernelGGL(( transpose), dim3(grid),dim3(block), 0, 0, mat_in_dev,mat_out_dev);
hipEventRecord(stop);
// copy from device to host
hipMemcpy(mat_out_h,mat_out_dev,size,hipMemcpyDeviceToHost);
printf("%d\n",verify(mat_in_h,mat_out_h));
hipEventSynchronize(stop);
float milliseconds = 0;
hipEventElapsedTime(&milliseconds, start, stop);
printf("dimensions of block: %d x %d\n",LINEAR_SIDE_X,LINEAR_SIDE_Y);
printf("Time in milliseconds: %f\n",milliseconds);
printf("Bandwidth: %f GB/s\n",2*size/milliseconds/1e6);
//free the memory
free(mat_in_h);
free(mat_out_h);
hipFree(mat_in_dev);
hipFree(mat_out_dev);
return 0;
}
| 5200792eea5a8a29a96f30c72bfa6b46b0b995d5.cu | #include<stdio.h>
#include<stdlib.h>
#include<math.h>
#define N 8192
#define LINEAR_SIDE_X 32
#define LINEAR_SIDE_Y 16
void print_matrix(int *p){
for(int i = 0;i<N;i++){
for(int j=0;j<N;j++){printf("%d ",p[i*N + j]);}
printf("\n");
}
}
void fill_matrix(int *p){
for(int i = 0; i<N;i++){
for(int j = 0; j<N;j++){p[i*N + j] = rand()%100 + 1;}
}
}
bool verify(int *a,int*b){
for(int i =0;i<N*N;i++){if(a[i]!=b[(i%N)*N + i/N]) return 0;}
return 1;
}
__global__ void transpose(int *mat_in_dev, int *mat_out_dev){
//local memory location where to save matrix portion
//related to the blocks
// shared by all threads within a block
__shared__ int temp_matrix[LINEAR_SIDE_X][LINEAR_SIDE_Y];
// creation of the global indexes in order to journey to the matrix
int global_x = blockIdx.x*blockDim.x + threadIdx.x;
int global_y = blockIdx.y*blockDim.y + threadIdx.y;
// now we created global indexes referred to transpose matrix
//copy in the sub matrix
temp_matrix[threadIdx.x][threadIdx.y] = mat_in_dev[global_x*N + global_y];
__syncthreads();
//copy submatrix in out
mat_out_dev[global_y*N + global_x]=temp_matrix[threadIdx.x][threadIdx.y];
}
int main(void){
int *mat_in_h;
int *mat_out_h;
int *mat_in_dev;
int *mat_out_dev;
//allocation of memory
mat_in_h = (int*)malloc(N*N*sizeof(int));
mat_out_h = (int*)malloc(N*N*sizeof(int));
cudaMalloc((void**)&mat_in_dev,N*N*sizeof(int));
cudaMalloc((void**)&mat_out_dev,N*N*sizeof(int));
fill_matrix(mat_in_h);
//copy matrix from host to device
int size = N*N*sizeof(int);
cudaMemcpy(mat_in_dev,mat_in_h,size,cudaMemcpyHostToDevice);
// definition of variables dim3
dim3 grid,block;
block.x = LINEAR_SIDE_X;
block.y = LINEAR_SIDE_Y;
grid.x = N/block.x;
grid.y = N/block.y;
// timing
cudaEvent_t start, stop;
cudaEventCreate(&start);
cudaEventCreate(&stop);
cudaEventRecord(start);
transpose<<<grid,block>>>(mat_in_dev,mat_out_dev);
cudaEventRecord(stop);
// copy from device to host
cudaMemcpy(mat_out_h,mat_out_dev,size,cudaMemcpyDeviceToHost);
printf("%d\n",verify(mat_in_h,mat_out_h));
cudaEventSynchronize(stop);
float milliseconds = 0;
cudaEventElapsedTime(&milliseconds, start, stop);
printf("dimensions of block: %d x %d\n",LINEAR_SIDE_X,LINEAR_SIDE_Y);
printf("Time in milliseconds: %f\n",milliseconds);
printf("Bandwidth: %f GB/s\n",2*size/milliseconds/1e6);
//free the memory
free(mat_in_h);
free(mat_out_h);
cudaFree(mat_in_dev);
cudaFree(mat_out_dev);
return 0;
}
|
16fc55171ad9e7af5b8c814fbfa63bad4addc938.hip | // !!! This is a file automatically generated by hipify!!!
/*
* Copyright (c) 2018-2019, NVIDIA CORPORATION. All rights reserved.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include <hip/hip_runtime.h>
#include <thrust/device_vector.h>
#include <thrust/for_each.h>
#include <rmm/rmm.h>
#include <rmm/thrust_rmm_allocator.h>
#include <utilities/error_utils.hpp>
#include "nvstrings/NVStrings.h"
#include "./NVStringsImpl.h"
#include "../custring_view.cuh"
#include "../util.h"
// remove the target characters from the beginning of each string
NVStrings* NVStrings::lstrip( const char* to_strip )
{
unsigned int count = size();
custring_view_array d_strings = pImpl->getStringsPtr();
auto execpol = rmm::exec_policy(0);
char* d_strip = nullptr;
if( to_strip )
{
int len = (int)strlen(to_strip) + 1; // include null
d_strip = device_alloc<char>(len,0);
CUDA_TRY( hipMemcpyAsync(d_strip,to_strip,len,hipMemcpyHostToDevice))
}
// compute size of output buffer
rmm::device_vector<size_t> lengths(count,0);
size_t* d_lengths = lengths.data().get();
thrust::for_each_n(execpol->on(0), thrust::make_counting_iterator<unsigned int>(0), count,
[d_strings, d_strip, d_lengths] __device__(unsigned int idx){
custring_view* dstr = d_strings[idx];
if( !dstr )
return;
unsigned int len = dstr->lstrip_size(d_strip);
len = ALIGN_SIZE(len);
d_lengths[idx] = (size_t)len;
});
// create output object
NVStrings* rtn = new NVStrings(count);
char* d_buffer = rtn->pImpl->createMemoryFor(d_lengths);
if( d_buffer==0 )
{
if( d_strip )
RMM_FREE(d_strip,0);
return rtn; // all strings are null
}
// create offsets
rmm::device_vector<size_t> offsets(count,0);
thrust::exclusive_scan(execpol->on(0),lengths.begin(),lengths.end(),offsets.begin());
// do the strip
custring_view** d_results = rtn->pImpl->getStringsPtr();
size_t* d_offsets = offsets.data().get();
thrust::for_each_n(execpol->on(0), thrust::make_counting_iterator<unsigned int>(0), count,
[d_strings, d_strip, d_buffer, d_offsets, d_results] __device__(unsigned int idx){
custring_view* dstr = d_strings[idx];
if( !dstr )
return;
char* buffer = d_buffer + d_offsets[idx];
d_results[idx] = dstr->lstrip(d_strip,buffer);
});
//
if( d_strip )
RMM_FREE(d_strip,0);
return rtn;
}
// remove the target character from the beginning and the end of each string
NVStrings* NVStrings::strip( const char* to_strip )
{
unsigned int count = size();
custring_view_array d_strings = pImpl->getStringsPtr();
auto execpol = rmm::exec_policy(0);
char* d_strip = nullptr;
if( to_strip )
{
int len = (int)strlen(to_strip) + 1; // include null
d_strip = device_alloc<char>(len,0);
CUDA_TRY( hipMemcpyAsync(d_strip,to_strip,len,hipMemcpyHostToDevice))
}
// compute size of output buffer
rmm::device_vector<size_t> lengths(count,0);
size_t* d_lengths = lengths.data().get();
thrust::for_each_n(execpol->on(0), thrust::make_counting_iterator<unsigned int>(0), count,
[d_strings, d_strip, d_lengths] __device__(unsigned int idx){
custring_view* dstr = d_strings[idx];
if( !dstr )
return;
unsigned int len = dstr->strip_size(d_strip);
len = ALIGN_SIZE(len);
d_lengths[idx] = (size_t)len;
});
// create output object
NVStrings* rtn = new NVStrings(count);
char* d_buffer = rtn->pImpl->createMemoryFor(d_lengths);
if( d_buffer==0 )
{
if( d_strip )
RMM_FREE(d_strip,0);
return rtn;
}
// create offsets
rmm::device_vector<size_t> offsets(count,0);
thrust::exclusive_scan(execpol->on(0),lengths.begin(),lengths.end(),offsets.begin());
// do the strip
custring_view_array d_results = rtn->pImpl->getStringsPtr();
size_t* d_offsets = offsets.data().get();
thrust::for_each_n(execpol->on(0), thrust::make_counting_iterator<unsigned int>(0), count,
[d_strings, d_strip, d_buffer, d_offsets, d_results] __device__(unsigned int idx){
custring_view* dstr = d_strings[idx];
if( !dstr )
return;
char* buffer = d_buffer + d_offsets[idx];
d_results[idx] = dstr->strip(d_strip,buffer);
});
//
if( d_strip )
RMM_FREE(d_strip,0);
return rtn;
}
// remove the target character from the end of each string
NVStrings* NVStrings::rstrip( const char* to_strip )
{
unsigned int count = size();
custring_view_array d_strings = pImpl->getStringsPtr();
auto execpol = rmm::exec_policy(0);
char* d_strip = nullptr;
if( to_strip )
{
int len = (int)strlen(to_strip) + 1; // include null
d_strip = device_alloc<char>(len,0);
CUDA_TRY( hipMemcpyAsync(d_strip,to_strip,len,hipMemcpyHostToDevice))
}
// compute size of output buffer
rmm::device_vector<size_t> lengths(count,0);
size_t* d_lengths = lengths.data().get();
thrust::for_each_n(execpol->on(0), thrust::make_counting_iterator<unsigned int>(0), count,
[d_strings, d_strip, d_lengths] __device__(unsigned int idx){
custring_view* dstr = d_strings[idx];
if( !dstr )
return;
unsigned int len = dstr->rstrip_size(d_strip);
len = ALIGN_SIZE(len);
d_lengths[idx] = (size_t)len;
});
// create output object
NVStrings* rtn = new NVStrings(count);
char* d_buffer = rtn->pImpl->createMemoryFor(d_lengths);
if( d_buffer==0 )
{
if( d_strip )
RMM_FREE(d_strip,0);
return rtn; // all strings are null
}
// create offsets
rmm::device_vector<size_t> offsets(count,0);
thrust::exclusive_scan(execpol->on(0),lengths.begin(),lengths.end(),offsets.begin());
// do the strip
custring_view_array d_results = rtn->pImpl->getStringsPtr();
size_t* d_offsets = offsets.data().get();
thrust::for_each_n(execpol->on(0), thrust::make_counting_iterator<unsigned int>(0), count,
[d_strings, d_strip, d_buffer, d_offsets, d_results] __device__(unsigned int idx){
custring_view* dstr = d_strings[idx];
if( !dstr )
return;
char* buffer = d_buffer + d_offsets[idx];
d_results[idx] = dstr->rstrip(d_strip,buffer);
});
//
if( d_strip )
RMM_FREE(d_strip,0);
return rtn;
}
| 16fc55171ad9e7af5b8c814fbfa63bad4addc938.cu | /*
* Copyright (c) 2018-2019, NVIDIA CORPORATION. All rights reserved.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include <cuda_runtime.h>
#include <thrust/device_vector.h>
#include <thrust/for_each.h>
#include <rmm/rmm.h>
#include <rmm/thrust_rmm_allocator.h>
#include <utilities/error_utils.hpp>
#include "nvstrings/NVStrings.h"
#include "./NVStringsImpl.h"
#include "../custring_view.cuh"
#include "../util.h"
// remove the target characters from the beginning of each string
NVStrings* NVStrings::lstrip( const char* to_strip )
{
unsigned int count = size();
custring_view_array d_strings = pImpl->getStringsPtr();
auto execpol = rmm::exec_policy(0);
char* d_strip = nullptr;
if( to_strip )
{
int len = (int)strlen(to_strip) + 1; // include null
d_strip = device_alloc<char>(len,0);
CUDA_TRY( cudaMemcpyAsync(d_strip,to_strip,len,cudaMemcpyHostToDevice))
}
// compute size of output buffer
rmm::device_vector<size_t> lengths(count,0);
size_t* d_lengths = lengths.data().get();
thrust::for_each_n(execpol->on(0), thrust::make_counting_iterator<unsigned int>(0), count,
[d_strings, d_strip, d_lengths] __device__(unsigned int idx){
custring_view* dstr = d_strings[idx];
if( !dstr )
return;
unsigned int len = dstr->lstrip_size(d_strip);
len = ALIGN_SIZE(len);
d_lengths[idx] = (size_t)len;
});
// create output object
NVStrings* rtn = new NVStrings(count);
char* d_buffer = rtn->pImpl->createMemoryFor(d_lengths);
if( d_buffer==0 )
{
if( d_strip )
RMM_FREE(d_strip,0);
return rtn; // all strings are null
}
// create offsets
rmm::device_vector<size_t> offsets(count,0);
thrust::exclusive_scan(execpol->on(0),lengths.begin(),lengths.end(),offsets.begin());
// do the strip
custring_view** d_results = rtn->pImpl->getStringsPtr();
size_t* d_offsets = offsets.data().get();
thrust::for_each_n(execpol->on(0), thrust::make_counting_iterator<unsigned int>(0), count,
[d_strings, d_strip, d_buffer, d_offsets, d_results] __device__(unsigned int idx){
custring_view* dstr = d_strings[idx];
if( !dstr )
return;
char* buffer = d_buffer + d_offsets[idx];
d_results[idx] = dstr->lstrip(d_strip,buffer);
});
//
if( d_strip )
RMM_FREE(d_strip,0);
return rtn;
}
// remove the target character from the beginning and the end of each string
NVStrings* NVStrings::strip( const char* to_strip )
{
unsigned int count = size();
custring_view_array d_strings = pImpl->getStringsPtr();
auto execpol = rmm::exec_policy(0);
char* d_strip = nullptr;
if( to_strip )
{
int len = (int)strlen(to_strip) + 1; // include null
d_strip = device_alloc<char>(len,0);
CUDA_TRY( cudaMemcpyAsync(d_strip,to_strip,len,cudaMemcpyHostToDevice))
}
// compute size of output buffer
rmm::device_vector<size_t> lengths(count,0);
size_t* d_lengths = lengths.data().get();
thrust::for_each_n(execpol->on(0), thrust::make_counting_iterator<unsigned int>(0), count,
[d_strings, d_strip, d_lengths] __device__(unsigned int idx){
custring_view* dstr = d_strings[idx];
if( !dstr )
return;
unsigned int len = dstr->strip_size(d_strip);
len = ALIGN_SIZE(len);
d_lengths[idx] = (size_t)len;
});
// create output object
NVStrings* rtn = new NVStrings(count);
char* d_buffer = rtn->pImpl->createMemoryFor(d_lengths);
if( d_buffer==0 )
{
if( d_strip )
RMM_FREE(d_strip,0);
return rtn;
}
// create offsets
rmm::device_vector<size_t> offsets(count,0);
thrust::exclusive_scan(execpol->on(0),lengths.begin(),lengths.end(),offsets.begin());
// do the strip
custring_view_array d_results = rtn->pImpl->getStringsPtr();
size_t* d_offsets = offsets.data().get();
thrust::for_each_n(execpol->on(0), thrust::make_counting_iterator<unsigned int>(0), count,
[d_strings, d_strip, d_buffer, d_offsets, d_results] __device__(unsigned int idx){
custring_view* dstr = d_strings[idx];
if( !dstr )
return;
char* buffer = d_buffer + d_offsets[idx];
d_results[idx] = dstr->strip(d_strip,buffer);
});
//
if( d_strip )
RMM_FREE(d_strip,0);
return rtn;
}
// remove the target character from the end of each string
NVStrings* NVStrings::rstrip( const char* to_strip )
{
unsigned int count = size();
custring_view_array d_strings = pImpl->getStringsPtr();
auto execpol = rmm::exec_policy(0);
char* d_strip = nullptr;
if( to_strip )
{
int len = (int)strlen(to_strip) + 1; // include null
d_strip = device_alloc<char>(len,0);
CUDA_TRY( cudaMemcpyAsync(d_strip,to_strip,len,cudaMemcpyHostToDevice))
}
// compute size of output buffer
rmm::device_vector<size_t> lengths(count,0);
size_t* d_lengths = lengths.data().get();
thrust::for_each_n(execpol->on(0), thrust::make_counting_iterator<unsigned int>(0), count,
[d_strings, d_strip, d_lengths] __device__(unsigned int idx){
custring_view* dstr = d_strings[idx];
if( !dstr )
return;
unsigned int len = dstr->rstrip_size(d_strip);
len = ALIGN_SIZE(len);
d_lengths[idx] = (size_t)len;
});
// create output object
NVStrings* rtn = new NVStrings(count);
char* d_buffer = rtn->pImpl->createMemoryFor(d_lengths);
if( d_buffer==0 )
{
if( d_strip )
RMM_FREE(d_strip,0);
return rtn; // all strings are null
}
// create offsets
rmm::device_vector<size_t> offsets(count,0);
thrust::exclusive_scan(execpol->on(0),lengths.begin(),lengths.end(),offsets.begin());
// do the strip
custring_view_array d_results = rtn->pImpl->getStringsPtr();
size_t* d_offsets = offsets.data().get();
thrust::for_each_n(execpol->on(0), thrust::make_counting_iterator<unsigned int>(0), count,
[d_strings, d_strip, d_buffer, d_offsets, d_results] __device__(unsigned int idx){
custring_view* dstr = d_strings[idx];
if( !dstr )
return;
char* buffer = d_buffer + d_offsets[idx];
d_results[idx] = dstr->rstrip(d_strip,buffer);
});
//
if( d_strip )
RMM_FREE(d_strip,0);
return rtn;
}
|
372d3e1dbd8cce30a07748c9098aeac489619922.hip | // !!! This is a file automatically generated by hipify!!!
#include <stdio.h>
#include <unistd.h>
#include "PreciseTimer.h"
#include "../src/cuMPI_runtime.h"
int myRank; // cuMPI comm local ranks
int nRanks; // total cuMPI comm ranks
int localRank; // CUDA device ID
ncclUniqueId id; // NCCL Unique ID
cuMPI_Comm comm; // cuMPI comm
hipStream_t commStream; // CUDA stream generated for each GPU
cuMPI_Comm defaultComm; // cuMPI comm
hipStream_t defaultCommStream; // CUDA stream generated for each GPU
uint64_t hostHashs[10]; // host name hash in cuMPI
char hostname[1024]; // host name for identification in cuMPI
std::map<cuMPI_Comm, hipStream_t> comm2stream;
// test P2P SendRecv method, using cocurrent overlapping
int main() {
cuMPI_Init(NULL, NULL);
const int count = (1L << 24);
const long long data_bytes = count * sizeof(float); // 256 MiB
const int max_times = 64;
float *d_send[max_times] = {}, *d_recv[max_times] = {};
for (int i = 0; i < max_times; ++i) {
CUDA_CHECK(hipMalloc(&d_send[i], data_bytes));
CUDA_CHECK(hipMalloc(&d_recv[i], data_bytes));
}
cuMPI_Status status;
int peer = 1 - myRank;
cuMPI_Comm pipe[max_times];
for (int i = 0; i < max_times; ++i) {
cuMPI_NewGlobalComm(&pipe[i]);
}
toth::PreciseTimer timer;
timer.start();
// Added cocurrent overlapping
for (int i = 0; i < max_times; ++i) {
cuMPI_CocurrentStart(pipe[i]);
cuMPI_Sendrecv(d_send[i], count, cuMPI_FLOAT, peer, 0, d_recv[i], count, cuMPI_FLOAT, localRank, 0, pipe[i], &status);
cuMPI_CocurrentEnd(pipe[i]);
}
hipDeviceSynchronize();
timer.stop();
double time = timer.milliseconds() / 1000.0;
const int data_mibytes = (data_bytes >> 20);
printf("Send & Recv NCCL tests\n");
printf("Data Size Each Time:\t%12.6f MBytes\n", (double)data_mibytes);
printf("Total Double Exchange:\t%12.6f GBytes\n", (double)(2 * max_times * data_mibytes / 1024));
printf("Performed times count:\t %d\n", max_times);
printf("Total Time cost:\t%12.6f seconds\n", time);
printf("Average Time cost:\t%12.6f seconds\n", time/(double)(max_times));
printf("Average Bus width(S):\t%12.6f GBytes/s\n", (double)(max_times * data_mibytes / 1024)/time);
printf("Average Bus width(D):\t%12.6f GBytes/s\n", (double)(2 * max_times * data_mibytes / 1024)/time);
printf("* S: Single Direction\tD: Double Direction\n\n");
for (int i = 0; i < max_times; ++i) {
CUDA_CHECK(hipFree(d_send[i]));
CUDA_CHECK(hipFree(d_recv[i]));
}
cuMPI_Finalize();
return 0;
}
| 372d3e1dbd8cce30a07748c9098aeac489619922.cu | #include <stdio.h>
#include <unistd.h>
#include "PreciseTimer.h"
#include "../src/cuMPI_runtime.h"
int myRank; // cuMPI comm local ranks
int nRanks; // total cuMPI comm ranks
int localRank; // CUDA device ID
ncclUniqueId id; // NCCL Unique ID
cuMPI_Comm comm; // cuMPI comm
cudaStream_t commStream; // CUDA stream generated for each GPU
cuMPI_Comm defaultComm; // cuMPI comm
cudaStream_t defaultCommStream; // CUDA stream generated for each GPU
uint64_t hostHashs[10]; // host name hash in cuMPI
char hostname[1024]; // host name for identification in cuMPI
std::map<cuMPI_Comm, cudaStream_t> comm2stream;
// test P2P SendRecv method, using cocurrent overlapping
int main() {
cuMPI_Init(NULL, NULL);
const int count = (1L << 24);
const long long data_bytes = count * sizeof(float); // 256 MiB
const int max_times = 64;
float *d_send[max_times] = {}, *d_recv[max_times] = {};
for (int i = 0; i < max_times; ++i) {
CUDA_CHECK(cudaMalloc(&d_send[i], data_bytes));
CUDA_CHECK(cudaMalloc(&d_recv[i], data_bytes));
}
cuMPI_Status status;
int peer = 1 - myRank;
cuMPI_Comm pipe[max_times];
for (int i = 0; i < max_times; ++i) {
cuMPI_NewGlobalComm(&pipe[i]);
}
toth::PreciseTimer timer;
timer.start();
// Added cocurrent overlapping
for (int i = 0; i < max_times; ++i) {
cuMPI_CocurrentStart(pipe[i]);
cuMPI_Sendrecv(d_send[i], count, cuMPI_FLOAT, peer, 0, d_recv[i], count, cuMPI_FLOAT, localRank, 0, pipe[i], &status);
cuMPI_CocurrentEnd(pipe[i]);
}
cudaDeviceSynchronize();
timer.stop();
double time = timer.milliseconds() / 1000.0;
const int data_mibytes = (data_bytes >> 20);
printf("Send & Recv NCCL tests\n");
printf("Data Size Each Time:\t%12.6f MBytes\n", (double)data_mibytes);
printf("Total Double Exchange:\t%12.6f GBytes\n", (double)(2 * max_times * data_mibytes / 1024));
printf("Performed times count:\t %d\n", max_times);
printf("Total Time cost:\t%12.6f seconds\n", time);
printf("Average Time cost:\t%12.6f seconds\n", time/(double)(max_times));
printf("Average Bus width(S):\t%12.6f GBytes/s\n", (double)(max_times * data_mibytes / 1024)/time);
printf("Average Bus width(D):\t%12.6f GBytes/s\n", (double)(2 * max_times * data_mibytes / 1024)/time);
printf("* S: Single Direction\tD: Double Direction\n\n");
for (int i = 0; i < max_times; ++i) {
CUDA_CHECK(cudaFree(d_send[i]));
CUDA_CHECK(cudaFree(d_recv[i]));
}
cuMPI_Finalize();
return 0;
}
|
c6a89eb497e9bc927b493dffdbf58e4f344e7c00.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
__device__ float L[10];
__global__ void kernel3(float*fc1_w, float* fc2_w){
int a=threadIdx.y*32+threadIdx.x;
if(I[a]+fc1_b[a]>0){
I[a]=I[a]+fc1_b[a];}
else I[a]=0;
//j=10 a=512
for(int j=0;j<10;j++)
atomicAdd(&L[j],I[a]*fc2_w[a*10+j]);
} | c6a89eb497e9bc927b493dffdbf58e4f344e7c00.cu | __device__ float L[10];
__global__ void kernel3(float*fc1_w, float* fc2_w){
int a=threadIdx.y*32+threadIdx.x;
if(I[a]+fc1_b[a]>0){
I[a]=I[a]+fc1_b[a];}
else I[a]=0;
//j=10 a=512
for(int j=0;j<10;j++)
atomicAdd(&L[j],I[a]*fc2_w[a*10+j]);
} |
ef1626bd2c8d213a78da9a4786f8e436c34b040e.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <wb.h>
#define wbCheck(stmt) do { \
hipError_t err = stmt; \
if (err != hipSuccess) { \
wbLog(ERROR, "Failed to run stmt ", #stmt); \
wbLog(ERROR, "Got CUDA error ... ", hipGetErrorString(err)); \
return -1; \
} \
} while(0)
__global__ void vecAdd(float * in1, float * in2, float * out, int len) {
//@@ Insert code to implement vector addition here
int i = blockIdx.x*blockDim.x + threadIdx.x;
if ( i < len )
out[i] = in1[i] + in2[i];
}
#define NB_STREAMS 8
#define BLOCK_SIZE 64
int main(int argc, char ** argv) {
wbArg_t args;
int inputLength;
float * hostInput1;
float * hostInput2;
float * hostOutput;
args = wbArg_read(argc, argv);
wbTime_start(Generic, "Importing data and creating memory on host");
hostInput1 = (float *) wbImport(wbArg_getInputFile(args, 0), &inputLength);
hostInput2 = (float *) wbImport(wbArg_getInputFile(args, 1), &inputLength);
hostOutput = (float *) malloc(inputLength * sizeof(float));
wbTime_stop(Generic, "Importing data and creating memory on host");
wbLog(TRACE, "The input length is ", inputLength);
// Create streams
hipStream_t streams[NB_STREAMS];
for ( int ii = 0 ; ii < NB_STREAMS; ++ii)
{
wbCheck(hipStreamCreate(&streams[ii]));
}
int SegSize = (inputLength-1)/NB_STREAMS + 1;
wbLog(TRACE, "The SegSize length is ", SegSize);
// Allocate device buffers
float *d_In1[NB_STREAMS];
float *d_In2[NB_STREAMS];
float *d_Out[NB_STREAMS];
int size = SegSize * sizeof(float);
for ( int ii = 0 ; ii < NB_STREAMS; ++ii)
{
wbCheck(hipMalloc((void **) &d_In1[ii], size));
wbCheck(hipMalloc((void **) &d_In2[ii], size));
wbCheck(hipMalloc((void **) &d_Out[ii], size));
}
dim3 dimBlock(BLOCK_SIZE);
dim3 dimGrid((SegSize-1)/BLOCK_SIZE + 1);
wbLog(TRACE, "GridDim.x == ", dimGrid.x, " BlockDim.x == ", dimBlock.x);
hipMemcpyAsync(d_In1[0], hostInput1, size, hipMemcpyHostToDevice, streams[0]);
hipMemcpyAsync(d_In2[0], hostInput2, size, hipMemcpyHostToDevice, streams[0]);
hipLaunchKernelGGL(( vecAdd), dim3(dimGrid), dim3(dimBlock), 0, streams[0], d_In1[0], d_In2[0], d_Out[0], SegSize);
hipMemcpyAsync(d_In1[1], hostInput1+SegSize, size, hipMemcpyHostToDevice, streams[1]);
hipMemcpyAsync(d_In2[1], hostInput2+SegSize, size, hipMemcpyHostToDevice, streams[1]);
// Launch work on gpu queue
int idxStream=1;
for ( ; idxStream<NB_STREAMS-1; idxStream++)
{
hipMemcpyAsync(hostOutput+((idxStream-1)*SegSize), d_Out[idxStream-1], size, hipMemcpyDeviceToHost, streams[idxStream-1]);
hipLaunchKernelGGL(( vecAdd), dim3(dimGrid), dim3(dimBlock), 0, streams[idxStream], d_In1[idxStream], d_In2[idxStream], d_Out[idxStream], SegSize);
hipMemcpyAsync(d_In1[idxStream+1], hostInput1+((idxStream+1)*SegSize), size, hipMemcpyHostToDevice, streams[idxStream+1]);
hipMemcpyAsync(d_In2[idxStream+1], hostInput2+((idxStream+1)*SegSize), size, hipMemcpyHostToDevice, streams[idxStream+1]);
}
hipMemcpyAsync(hostOutput+((idxStream-1)*SegSize), d_Out[idxStream-1], size, hipMemcpyDeviceToHost, streams[idxStream-1]);
hipLaunchKernelGGL(( vecAdd), dim3(dimGrid), dim3(dimBlock), 0, streams[idxStream], d_In1[idxStream], d_In2[idxStream], d_Out[idxStream], SegSize);
hipMemcpyAsync(hostOutput+(idxStream*SegSize), d_Out[idxStream], size, hipMemcpyDeviceToHost, streams[idxStream]);
// Wait for it
hipDeviceSynchronize();
wbTime_start(GPU, "Freeing GPU Memory");
for ( int ii = 0 ; ii < NB_STREAMS; ++ii)
{
wbCheck(hipFree(d_In1[ii]));
wbCheck(hipFree(d_In2[ii]));
wbCheck(hipFree(d_Out[ii]));
}
wbTime_stop(GPU, "Freeing GPU Memory");
for ( int ii = 0 ; ii < NB_STREAMS; ++ii)
{
wbCheck(hipStreamDestroy(streams[ii]));
}
wbSolution(args, hostOutput, inputLength);
free(hostInput1);
free(hostInput2);
free(hostOutput);
return 0;
}
| ef1626bd2c8d213a78da9a4786f8e436c34b040e.cu | #include <wb.h>
#define wbCheck(stmt) do { \
cudaError_t err = stmt; \
if (err != cudaSuccess) { \
wbLog(ERROR, "Failed to run stmt ", #stmt); \
wbLog(ERROR, "Got CUDA error ... ", cudaGetErrorString(err)); \
return -1; \
} \
} while(0)
__global__ void vecAdd(float * in1, float * in2, float * out, int len) {
//@@ Insert code to implement vector addition here
int i = blockIdx.x*blockDim.x + threadIdx.x;
if ( i < len )
out[i] = in1[i] + in2[i];
}
#define NB_STREAMS 8
#define BLOCK_SIZE 64
int main(int argc, char ** argv) {
wbArg_t args;
int inputLength;
float * hostInput1;
float * hostInput2;
float * hostOutput;
args = wbArg_read(argc, argv);
wbTime_start(Generic, "Importing data and creating memory on host");
hostInput1 = (float *) wbImport(wbArg_getInputFile(args, 0), &inputLength);
hostInput2 = (float *) wbImport(wbArg_getInputFile(args, 1), &inputLength);
hostOutput = (float *) malloc(inputLength * sizeof(float));
wbTime_stop(Generic, "Importing data and creating memory on host");
wbLog(TRACE, "The input length is ", inputLength);
// Create streams
cudaStream_t streams[NB_STREAMS];
for ( int ii = 0 ; ii < NB_STREAMS; ++ii)
{
wbCheck(cudaStreamCreate(&streams[ii]));
}
int SegSize = (inputLength-1)/NB_STREAMS + 1;
wbLog(TRACE, "The SegSize length is ", SegSize);
// Allocate device buffers
float *d_In1[NB_STREAMS];
float *d_In2[NB_STREAMS];
float *d_Out[NB_STREAMS];
int size = SegSize * sizeof(float);
for ( int ii = 0 ; ii < NB_STREAMS; ++ii)
{
wbCheck(cudaMalloc((void **) &d_In1[ii], size));
wbCheck(cudaMalloc((void **) &d_In2[ii], size));
wbCheck(cudaMalloc((void **) &d_Out[ii], size));
}
dim3 dimBlock(BLOCK_SIZE);
dim3 dimGrid((SegSize-1)/BLOCK_SIZE + 1);
wbLog(TRACE, "GridDim.x == ", dimGrid.x, " BlockDim.x == ", dimBlock.x);
cudaMemcpyAsync(d_In1[0], hostInput1, size, cudaMemcpyHostToDevice, streams[0]);
cudaMemcpyAsync(d_In2[0], hostInput2, size, cudaMemcpyHostToDevice, streams[0]);
vecAdd<<<dimGrid, dimBlock, 0, streams[0]>>>(d_In1[0], d_In2[0], d_Out[0], SegSize);
cudaMemcpyAsync(d_In1[1], hostInput1+SegSize, size, cudaMemcpyHostToDevice, streams[1]);
cudaMemcpyAsync(d_In2[1], hostInput2+SegSize, size, cudaMemcpyHostToDevice, streams[1]);
// Launch work on gpu queue
int idxStream=1;
for ( ; idxStream<NB_STREAMS-1; idxStream++)
{
cudaMemcpyAsync(hostOutput+((idxStream-1)*SegSize), d_Out[idxStream-1], size, cudaMemcpyDeviceToHost, streams[idxStream-1]);
vecAdd<<<dimGrid, dimBlock, 0, streams[idxStream]>>>(d_In1[idxStream], d_In2[idxStream], d_Out[idxStream], SegSize);
cudaMemcpyAsync(d_In1[idxStream+1], hostInput1+((idxStream+1)*SegSize), size, cudaMemcpyHostToDevice, streams[idxStream+1]);
cudaMemcpyAsync(d_In2[idxStream+1], hostInput2+((idxStream+1)*SegSize), size, cudaMemcpyHostToDevice, streams[idxStream+1]);
}
cudaMemcpyAsync(hostOutput+((idxStream-1)*SegSize), d_Out[idxStream-1], size, cudaMemcpyDeviceToHost, streams[idxStream-1]);
vecAdd<<<dimGrid, dimBlock, 0, streams[idxStream]>>>(d_In1[idxStream], d_In2[idxStream], d_Out[idxStream], SegSize);
cudaMemcpyAsync(hostOutput+(idxStream*SegSize), d_Out[idxStream], size, cudaMemcpyDeviceToHost, streams[idxStream]);
// Wait for it
cudaDeviceSynchronize();
wbTime_start(GPU, "Freeing GPU Memory");
for ( int ii = 0 ; ii < NB_STREAMS; ++ii)
{
wbCheck(cudaFree(d_In1[ii]));
wbCheck(cudaFree(d_In2[ii]));
wbCheck(cudaFree(d_Out[ii]));
}
wbTime_stop(GPU, "Freeing GPU Memory");
for ( int ii = 0 ; ii < NB_STREAMS; ++ii)
{
wbCheck(cudaStreamDestroy(streams[ii]));
}
wbSolution(args, hostOutput, inputLength);
free(hostInput1);
free(hostInput2);
free(hostOutput);
return 0;
}
|
8e204af58d9f09c2a544785cc2290c12ab2304c7.hip | // !!! This is a file automatically generated by hipify!!!
/*demo4_gpu_thread.c
*
* Get two numbers from input databuffer, calculate them and write the sum to output databuffer.
*/
#ifdef __cplusplus
extern "C"{
#endif
#include <stdio.h>
#include <stdlib.h>
#include <string.h>
#include <pthread.h>
#include <sys/time.h>
#include <sys/resource.h>
#include <sys/types.h>
#include <unistd.h>
#include "hashpipe.h"
#include "demo4_databuf.h"
#include "demo4_gpu_thread.h"
#include <hip/hip_runtime.h>
#include <hipfft.h>
#include <time.h>
#include <hip/hip_runtime.h>
int g_iIsDataReadDone = FALSE;
char* g_pc4Data_d = NULL; /* raw data starting address */
char* g_pc4DataRead_d = NULL; /* raw data read pointer */
int g_iNFFT = NFFT;
int g_iNFFT1 = NFFT;
int g_iNFFT2 = NFFT/2;
int g_iNFFT3 = NFFT/4;
int g_iNFFT4 = NFFT/8;
int g_iNFFT5 = NFFT/16;
int g_ISIZE1 = FFTPLAN1_ISIZE;
int g_OSIZE1 = FFTPLAN1_OSIZE;
int g_ISIZE2 = FFTPLAN2_ISIZE;
int g_OSIZE2 = FFTPLAN2_OSIZE;
int g_ISIZE3 = FFTPLAN3_ISIZE;
int g_OSIZE3 = FFTPLAN3_OSIZE;
int g_ISIZE4 = FFTPLAN4_ISIZE;
int g_OSIZE4 = FFTPLAN4_OSIZE;
int g_ISIZE5 = FFTPLAN5_ISIZE;
int g_OSIZE5 = FFTPLAN5_OSIZE;
dim3 g_dimBCopy(1, 1, 1);
dim3 g_dimGCopy(1, 1);
dim3 g_dimBAccum(1, 1, 1);
dim3 g_dimGAccum(1, 1);
int g_BatchAccumThreads;
int g_BatchAccumBlocks;
float* g_pf4FFTIn_d = NULL;
float2* g_pf4FFTOut1_d = NULL;
float2* g_pf4FFTOut2_d = NULL;
float2* g_pf4FFTOut3_d = NULL;
float2* g_pf4FFTOut4_d = NULL;
float2* g_pf4FFTOut5_d = NULL;
hipfftHandle g_stPlan1 = {0};
hipfftHandle g_stPlan2 = {0};
hipfftHandle g_stPlan3 = {0};
hipfftHandle g_stPlan4 = {0};
hipfftHandle g_stPlan5 = {0};
float* g_pf4SumStokes = NULL;
float* g_pf4SumStokes_d = NULL;
float* g_sumBatch1 = NULL;
float* g_sumBatch2 = NULL;
float* g_sumBatch3 = NULL;
float* g_sumBatch4 = NULL;
float* g_sumBatch5 = NULL;
/* BUG: crash if file size is less than 32MB */
int g_iSizeRead = DEF_LEN_IDATA;
static int Init(hashpipe_thread_args_t * args)
{
int iDevCount = 0;
hipDeviceProp_t stDevProp = {0};
int iRet = EXIT_SUCCESS;
hipfftResult iCUFFTRet = HIPFFT_SUCCESS;
int iMaxThreadsPerBlock = 0;
iRet = RegisterSignalHandlers();
if (iRet != EXIT_SUCCESS)
{
(void) fprintf(stderr, "ERROR: Signal-handler registration failed!\n");
return EXIT_FAILURE;
}
/* since CUDASafeCallWithCleanUp() calls hipGetErrorString(),
it should not be used here - will cause crash if no CUDA device is
found */
(void) hipGetDeviceCount(&iDevCount);
if (0 == iDevCount)
{
(void) fprintf(stderr, "ERROR: No CUDA-capable device found!\n");
return EXIT_FAILURE;
}
/* just use the first device */
CUDASafeCallWithCleanUp(hipSetDevice(0));
CUDASafeCallWithCleanUp(hipGetDeviceProperties(&stDevProp, 0));
iMaxThreadsPerBlock = stDevProp.maxThreadsPerBlock;
/* allocate memory for data array - 32MB is the block size for the VEGAS
input buffer */
//CUDASafeCallWithCleanUp(hipMalloc((void **) &g_pc4DataRead_d, g_iSizeRead));
CUDASafeCallWithCleanUp(hipMalloc((void **) &g_pc4Data_d, g_iSizeRead));
g_pc4DataRead_d = g_pc4Data_d;
/* calculate kernel parameters */
if (DEF_LEN_IDATA < iMaxThreadsPerBlock)
{
g_dimBCopy.x = DEF_LEN_IDATA;
g_dimBAccum.x = DEF_LEN_IDATA;
}
else
{
g_dimBCopy.x = iMaxThreadsPerBlock;
g_dimBAccum.x = iMaxThreadsPerBlock;
}
g_dimGCopy.x = (DEF_LEN_IDATA) / iMaxThreadsPerBlock;
g_dimGAccum.x = (DEF_LEN_IDATA) / iMaxThreadsPerBlock;
if (DEF_LEN_ODATA < iMaxThreadsPerBlock){
g_BatchAccumThreads = DEF_LEN_ODATA;
}
else{
g_BatchAccumThreads = iMaxThreadsPerBlock;
}
g_BatchAccumBlocks = DEF_LEN_ODATA/iMaxThreadsPerBlock;
CUDASafeCallWithCleanUp(hipMalloc((void **) &g_pf4FFTIn_d, DEF_LEN_IDATA * sizeof(float)));
CUDASafeCallWithCleanUp(hipMalloc((void **) &g_pf4FFTOut1_d, DEF_LEN_IDATA * sizeof(float2)));
CUDASafeCallWithCleanUp(hipMalloc((void **) &g_pf4FFTOut2_d, DEF_LEN_IDATA * sizeof(float2)));
CUDASafeCallWithCleanUp(hipMalloc((void **) &g_pf4FFTOut3_d, DEF_LEN_IDATA * sizeof(float2)));
CUDASafeCallWithCleanUp(hipMalloc((void **) &g_pf4FFTOut4_d, DEF_LEN_IDATA * sizeof(float2)));
CUDASafeCallWithCleanUp(hipMalloc((void **) &g_pf4FFTOut5_d, DEF_LEN_IDATA * sizeof(float2)));
g_pf4SumStokes = (float *) malloc(DEF_LEN_IDATA * sizeof(float));
if (NULL == g_pf4SumStokes)
{
(void) fprintf(stderr,
"ERROR: Memory allocation failed! %s.\n",
strerror(errno));
return EXIT_FAILURE;
}
CUDASafeCallWithCleanUp(hipMalloc((void **) &g_pf4SumStokes_d, DEF_LEN_IDATA * sizeof(float)));
CUDASafeCallWithCleanUp(hipMemset(g_pf4SumStokes_d, '\0', DEF_LEN_IDATA * sizeof(float)));
CUDASafeCallWithCleanUp(hipMalloc((void **) &g_sumBatch1, DEF_LEN_ODATA * sizeof(float)));
CUDASafeCallWithCleanUp(hipMemset(g_sumBatch1, '\0', DEF_LEN_ODATA * sizeof(float)));
CUDASafeCallWithCleanUp(hipMalloc((void **) &g_sumBatch2, DEF_LEN_ODATA * sizeof(float)));
CUDASafeCallWithCleanUp(hipMemset(g_sumBatch2, '\0', DEF_LEN_ODATA * sizeof(float)));
CUDASafeCallWithCleanUp(hipMalloc((void **) &g_sumBatch3, DEF_LEN_ODATA * sizeof(float)));
CUDASafeCallWithCleanUp(hipMemset(g_sumBatch3, '\0', DEF_LEN_ODATA * sizeof(float)));
CUDASafeCallWithCleanUp(hipMalloc((void **) &g_sumBatch4, DEF_LEN_ODATA * sizeof(float)));
CUDASafeCallWithCleanUp(hipMemset(g_sumBatch4, '\0', DEF_LEN_ODATA * sizeof(float)));
CUDASafeCallWithCleanUp(hipMalloc((void **) &g_sumBatch5, DEF_LEN_ODATA * sizeof(float)));
CUDASafeCallWithCleanUp(hipMemset(g_sumBatch5, '\0', DEF_LEN_ODATA * sizeof(float)));
/* create plan */
iCUFFTRet = hipfftPlanMany(&g_stPlan1,
FFTPLAN_RANK,
&g_iNFFT1,
&g_ISIZE1,
FFTPLAN1_ISTRIDE,
FFTPLAN1_IDIST,
&g_OSIZE1,
FFTPLAN1_OSTRIDE,
FFTPLAN1_ODIST,
HIPFFT_R2C,
FFTPLAN1_BATCH);
if (iCUFFTRet != HIPFFT_SUCCESS)
{
(void) fprintf(stderr, "ERROR: Plan1 creation failed!\n");
return EXIT_FAILURE;
}
iCUFFTRet = hipfftPlanMany(&g_stPlan2, FFTPLAN_RANK, &g_iNFFT2, &g_ISIZE2, FFTPLAN2_ISTRIDE, FFTPLAN2_IDIST,
&g_OSIZE2, FFTPLAN2_OSTRIDE, FFTPLAN2_ODIST, HIPFFT_R2C, FFTPLAN2_BATCH);
if (iCUFFTRet != HIPFFT_SUCCESS)
{
(void) fprintf(stderr, "ERROR: Plan2 creation failed!\n");
return EXIT_FAILURE;
}
iCUFFTRet = hipfftPlanMany(&g_stPlan3, FFTPLAN_RANK, &g_iNFFT3, &g_ISIZE3, FFTPLAN3_ISTRIDE, FFTPLAN3_IDIST,
&g_OSIZE3, FFTPLAN3_OSTRIDE, FFTPLAN3_ODIST, HIPFFT_R2C, FFTPLAN3_BATCH);
if (iCUFFTRet != HIPFFT_SUCCESS)
{
(void) fprintf(stderr, "ERROR: Plan3 creation failed!\n");
return EXIT_FAILURE;
}
iCUFFTRet = hipfftPlanMany(&g_stPlan4, FFTPLAN_RANK, &g_iNFFT4, &g_ISIZE4, FFTPLAN4_ISTRIDE, FFTPLAN4_IDIST,
&g_OSIZE4, FFTPLAN4_OSTRIDE, FFTPLAN4_ODIST, HIPFFT_R2C, FFTPLAN4_BATCH);
if (iCUFFTRet != HIPFFT_SUCCESS)
{
(void) fprintf(stderr, "ERROR: Plan4 creation failed!\n");
return EXIT_FAILURE;
}
iCUFFTRet = hipfftPlanMany(&g_stPlan5, FFTPLAN_RANK, &g_iNFFT5, &g_ISIZE5, FFTPLAN5_ISTRIDE, FFTPLAN5_IDIST,
&g_OSIZE5, FFTPLAN5_OSTRIDE, FFTPLAN5_ODIST, HIPFFT_R2C, FFTPLAN5_BATCH);
if (iCUFFTRet != HIPFFT_SUCCESS)
{
(void) fprintf(stderr, "ERROR: Plan5 creation failed!\n");
return EXIT_FAILURE;
}
return EXIT_SUCCESS;
}
/* function that frees resources */
void CleanUp()
{
/* free resources */
if (g_pc4Data_d != NULL)
{
(void) hipFree(g_pc4Data_d);
g_pc4Data_d = NULL;
}
if (g_pf4FFTIn_d != NULL)
{
(void) hipFree(g_pf4FFTIn_d);
g_pf4FFTIn_d = NULL;
}
if (g_pf4FFTOut1_d != NULL)
{
(void) hipFree(g_pf4FFTOut1_d);
g_pf4FFTOut1_d = NULL;
}
if (g_pf4FFTOut2_d != NULL)
{
(void) hipFree(g_pf4FFTOut2_d);
g_pf4FFTOut2_d = NULL;
}
if (g_pf4FFTOut3_d != NULL)
{
(void) hipFree(g_pf4FFTOut3_d);
g_pf4FFTOut3_d = NULL;
}
if (g_pf4FFTOut4_d != NULL)
{
(void) hipFree(g_pf4FFTOut4_d);
g_pf4FFTOut4_d = NULL;
}
if (g_pf4FFTOut5_d != NULL)
{
(void) hipFree(g_pf4FFTOut5_d);
g_pf4FFTOut5_d = NULL;
}
if (g_pf4SumStokes != NULL)
{
free(g_pf4SumStokes);
g_pf4SumStokes = NULL;
}
if (g_pf4SumStokes_d != NULL)
{
(void) hipFree(g_pf4SumStokes_d);
g_pf4SumStokes_d = NULL;
}
if (g_sumBatch2 != NULL)
{
(void) hipFree(g_sumBatch2);
g_sumBatch2 = NULL;
}
if (g_sumBatch1 != NULL)
{
(void) hipFree(g_sumBatch1);
g_sumBatch1 = NULL;
}
if (g_sumBatch3 != NULL)
{
(void) hipFree(g_sumBatch3);
g_sumBatch3 = NULL;
}
if (g_sumBatch4 != NULL)
{
(void) hipFree(g_sumBatch4);
g_sumBatch4 = NULL;
}
if (g_sumBatch5 != NULL)
{
(void) hipFree(g_sumBatch5);
g_sumBatch5 = NULL;
}
/* destroy plan */
/* TODO: check for plan */
(void) hipfftDestroy(g_stPlan1);
(void) hipfftDestroy(g_stPlan2);
(void) hipfftDestroy(g_stPlan3);
(void) hipfftDestroy(g_stPlan4);
(void) hipfftDestroy(g_stPlan5);
/* TODO: check if open */
cpgclos();
return;
}
/*
* Registers handlers for SIGTERM and CTRL+C
*/
int RegisterSignalHandlers()
{
struct sigaction stSigHandler = {{0}};
int iRet = EXIT_SUCCESS;
/* register the CTRL+C-handling function */
stSigHandler.sa_handler = HandleStopSignals;
iRet = sigaction(SIGINT, &stSigHandler, NULL);
if (iRet != EXIT_SUCCESS)
{
(void) fprintf(stderr,
"ERROR: Handler registration failed for signal %d!\n",
SIGINT);
return EXIT_FAILURE;
}
/* register the SIGTERM-handling function */
stSigHandler.sa_handler = HandleStopSignals;
iRet = sigaction(SIGTERM, &stSigHandler, NULL);
if (iRet != EXIT_SUCCESS)
{
(void) fprintf(stderr,
"ERROR: Handler registration failed for signal %d!\n",
SIGTERM);
return EXIT_FAILURE;
}
return EXIT_SUCCESS;
}
/*
* Catches SIGTERM and CTRL+C and cleans up before exiting
*/
void HandleStopSignals(int iSigNo)
{
/* clean up */
CleanUp();
/* exit */
exit(EXIT_SUCCESS);
/* never reached */
return;
}
void __CUDASafeCallWithCleanUp(hipError_t iRet,
const char* pcFile,
const int iLine,
void (*pCleanUp)(void))
{
if (iRet != hipSuccess)
{
(void) fprintf(stderr,
"ERROR: File <%s>, Line %d: %s\n",
pcFile,
iLine,
hipGetErrorString(iRet));
/* free resources */
(*pCleanUp)();
exit(EXIT_FAILURE);
}
return;
}
/*
* Prints usage information
*/
void PrintUsage(const char *pcProgName)
{
(void) printf("Usage: %s [options] <data-file>\n",
pcProgName);
(void) printf(" -h --help ");
(void) printf("Display this usage information\n");
(void) printf(" -n --nfft <value> ");
(void) printf("Number of points in FFT\n");
(void) printf(" -p --pfb ");
(void) printf("Enable PFB\n");
(void) printf(" -a --nacc <value> ");
(void) printf("Number of spectra to add\n");
(void) printf(" -s --fsamp <value> ");
(void) printf("Sampling frequency\n");
return;
}
static void *run(hashpipe_thread_args_t * args)
{
// Local aliases to shorten access to args fields
demo4_input_databuf_t *db_in = (demo4_input_databuf_t *)args->ibuf;
demo4_output_databuf_t *db_out = (demo4_output_databuf_t *)args->obuf;
hashpipe_status_t st = args->st;
const char * status_key = args->thread_desc->skey;
int rv;
uint64_t mcnt=0;
int curblock_in=0;
int curblock_out=0;
int nhits = 0;
char *data_raw; // raw data will be feed to gpu thread
data_raw = (char *)malloc(g_iSizeRead*sizeof(char));
int n_frames; // number of frames has been processed
int iRet = EXIT_SUCCESS;
int iSpecCount = 0;
int iNumAcc = DEF_ACC;
//if(iNumAcc > g_iSizeRead/DEF_LEN_IDATA){iNumAcc=g_iSizeRead/DEF_LEN_IDATA;} // if accumulation number larger than data buffer, setit to number spectra frames of buffer
int n_spec = 0; // number of spectrum
int iProcData = 0;
hipError_t iCUDARet = hipSuccess;
struct timeval stStart = {0};
struct timeval stStop = {0};
const char *pcProgName = NULL;
int iNextOpt = 0;
/* valid short options */
const char* const pcOptsShort = "hb:n:pa:s:";
/* valid long options */
const struct option stOptsLong[] = {
{ "help", 0, NULL, 'h' },
{ "nsub", 1, NULL, 'b' },
{ "nfft", 1, NULL, 'n' },
{ "pfb", 0, NULL, 'p' },
{ "nacc", 1, NULL, 'a' },
{ "fsamp", 1, NULL, 's' },
{ NULL, 0, NULL, 0 }
};
while (run_threads()) {
hashpipe_status_lock_safe(&st);
hputi4(st.buf, "GPUBLKIN", curblock_in);
hputs(st.buf, status_key, "waiting");
hputi4(st.buf, "GPUBKOUT", curblock_out);
hputi8(st.buf,"GPUMCNT",mcnt);
hashpipe_status_unlock_safe(&st);
n_spec = 0;
// Wait for new output block to be free
while ((rv=demo4_output_databuf_wait_free(db_out, curblock_out)) != HASHPIPE_OK) {
if (rv==HASHPIPE_TIMEOUT) {
hashpipe_status_lock_safe(&st);
hputs(st.buf, status_key, "blocked gpu out");
hashpipe_status_unlock_safe(&st);
continue;
} else {
hashpipe_error(__FUNCTION__, "error waiting for free databuf");
pthread_exit(NULL);
break;
}
}
while(iSpecCount < iNumAcc){
// Wait for new input block to be filled
while ((rv=demo4_input_databuf_wait_filled(db_in, curblock_in)) != HASHPIPE_OK) {
if (rv==HASHPIPE_TIMEOUT) {
hashpipe_status_lock_safe(&st);
hputs(st.buf, status_key, "blocked");
hashpipe_status_unlock_safe(&st);
continue;
} else {
hashpipe_error(__FUNCTION__, "error waiting for filled databuf");
pthread_exit(NULL);
break;
}
}
// Note processing status
hashpipe_status_lock_safe(&st);
hputs(st.buf, status_key, "processing gpu");
hashpipe_status_unlock_safe(&st);
//get data from input databuf to local
memcpy(data_raw,db_in->block[curblock_in].data_block,g_iSizeRead*sizeof(char));
// write new data to the gpu buffer
CUDASafeCallWithCleanUp(hipMemcpy(g_pc4Data_d,
data_raw,
g_iSizeRead*sizeof(char),
hipMemcpyHostToDevice));
/* whenever there is a read, reset the read pointer to the beginning */
g_pc4DataRead_d = g_pc4Data_d;
hipLaunchKernelGGL(( CopyDataForFFT), dim3(g_dimGCopy), dim3(g_dimBCopy), 0, 0, g_pc4DataRead_d,
g_pf4FFTIn_d);
CUDASafeCallWithCleanUp(hipDeviceSynchronize());
iCUDARet = hipGetLastError();
if (iCUDARet != hipSuccess){
(void) fprintf(stderr,
"ERROR: File <%s>, Line %d: %s\n",
__FILE__,
__LINE__,
hipGetErrorString(iCUDARet));
CleanUp();
}
/* do fft */
iRet = DoFFT();
if (iRet != EXIT_SUCCESS){
(void) fprintf(stderr, "ERROR! FFT failed!\n");
CleanUp();
}
hipLaunchKernelGGL(( BatchAccumulate), dim3(g_BatchAccumBlocks), dim3(g_BatchAccumThreads), 0, 0, g_pf4FFTOut1_d,
g_pf4FFTOut2_d,
g_pf4FFTOut3_d,
g_pf4FFTOut4_d,
g_pf4FFTOut5_d,
g_sumBatch1,
g_sumBatch2,
g_sumBatch3,
g_sumBatch4,
g_sumBatch5,
DEF_LEN_ODATA
);
/*
hipLaunchKernelGGL(( BatchAccumulate), dim3(g_BatchAccumBlocks), dim3(g_BatchAccumThreads), 0, 0, g_pf4FFTOut1_d,
1,
DEF_LEN_ODATA+1,
g_sumBatch1);
hipLaunchKernelGGL(( BatchAccumulate), dim3(g_BatchAccumBlocks), dim3(g_BatchAccumThreads), 0, 0, g_pf4FFTOut2_d,
2,
DEF_LEN_ODATA/2+1,
g_sumBatch2);
hipLaunchKernelGGL(( BatchAccumulate), dim3(g_BatchAccumBlocks), dim3(g_BatchAccumThreads), 0, 0, g_pf4FFTOut3_d,
4,
DEF_LEN_ODATA/4+1,
g_sumBatch3);
hipLaunchKernelGGL(( BatchAccumulate), dim3(g_BatchAccumBlocks), dim3(g_BatchAccumThreads), 0, 0, g_pf4FFTOut4_d,
8,
DEF_LEN_ODATA/8+1,
g_sumBatch4);
hipLaunchKernelGGL(( BatchAccumulate), dim3(g_BatchAccumBlocks), dim3(g_BatchAccumThreads), 0, 0, g_pf4FFTOut5_d,
16,
DEF_LEN_ODATA/16+1,
g_sumBatch5);
*/
CUDASafeCallWithCleanUp(hipDeviceSynchronize());
iCUDARet = hipGetLastError();
if (iCUDARet != hipSuccess)
{
(void) fprintf(stderr,
"ERROR: File <%s>, Line %d: %s\n",
__FILE__,
__LINE__,
hipGetErrorString(iCUDARet));
CleanUp();
}
++iSpecCount;
// Mark input block as free and advance
demo4_input_databuf_set_free(db_in, curblock_in);
curblock_in = (curblock_in + 1) % db_in->header.n_block;
}
//store all spectrums untrimmed, concatenated into one output
/*
CUDASafeCallWithCleanUp(hipMemcpy(g_pf4SumStokes,
g_sumBatch1,
(DEF_LEN_ODATA
* sizeof(float)),
hipMemcpyDeviceToHost));
CUDASafeCallWithCleanUp(hipMemcpy(g_pf4SumStokes + DEF_LEN_ODATA,
g_sumBatch2,
(DEF_LEN_ODATA/2
* sizeof(float)),
hipMemcpyDeviceToHost));
CUDASafeCallWithCleanUp(hipMemcpy(g_pf4SumStokes + DEF_LEN_ODATA*3/2,
g_sumBatch3,
(DEF_LEN_ODATA/4
* sizeof(float)),
hipMemcpyDeviceToHost));
CUDASafeCallWithCleanUp(hipMemcpy(g_pf4SumStokes + DEF_LEN_ODATA*7/4,
g_sumBatch4,
(DEF_LEN_ODATA/8
* sizeof(float)),
hipMemcpyDeviceToHost));
CUDASafeCallWithCleanUp(hipMemcpy(g_pf4SumStokes + DEF_LEN_ODATA*15/8,
g_sumBatch5,
(DEF_LEN_ODATA/16
* sizeof(float)),
hipMemcpyDeviceToHost));
*/
//timmed spectrum
CUDASafeCallWithCleanUp(hipMemcpy(g_pf4SumStokes,
g_sumBatch1+DEF_LEN_ODATA/32,
(DEF_LEN_ODATA/32
* sizeof(float)),
hipMemcpyDeviceToHost));
CUDASafeCallWithCleanUp(hipMemcpy(g_pf4SumStokes + DEF_LEN_ODATA/32,
g_sumBatch2+DEF_LEN_ODATA/32,
(DEF_LEN_ODATA/32
* sizeof(float)),
hipMemcpyDeviceToHost));
CUDASafeCallWithCleanUp(hipMemcpy(g_pf4SumStokes + DEF_LEN_ODATA/16,
g_sumBatch3+DEF_LEN_ODATA/32,
(DEF_LEN_ODATA/32
* sizeof(float)),
hipMemcpyDeviceToHost));
CUDASafeCallWithCleanUp(hipMemcpy(g_pf4SumStokes + DEF_LEN_ODATA*3/32,
g_sumBatch4+DEF_LEN_ODATA/32,
(DEF_LEN_ODATA/32
* sizeof(float)),
hipMemcpyDeviceToHost));
CUDASafeCallWithCleanUp(hipMemcpy(g_pf4SumStokes + DEF_LEN_ODATA/8,
g_sumBatch5+DEF_LEN_ODATA/32,
(DEF_LEN_ODATA/32
* sizeof(float)),
hipMemcpyDeviceToHost));
memcpy(db_out->block[curblock_out].Stokes_Full+SIZEOF_OUT_STOKES*n_spec,g_pf4SumStokes,SIZEOF_OUT_STOKES*sizeof(float));
//printf("Stokes to output done!\n");
n_spec++;
/* reset time */
iSpecCount = 0;
/* zero accumulators */
CUDASafeCallWithCleanUp(hipMemset(g_pf4SumStokes_d,
'\0',
(DEF_LEN_IDATA
* sizeof(float))));
CUDASafeCallWithCleanUp(hipMemset(g_sumBatch2,
'\0',
(DEF_LEN_ODATA
* sizeof(float))));
CUDASafeCallWithCleanUp(hipMemset(g_sumBatch1,
'\0',
(DEF_LEN_ODATA
* sizeof(float))));
CUDASafeCallWithCleanUp(hipMemset(g_sumBatch3,
'\0',
(DEF_LEN_ODATA
* sizeof(float))));
CUDASafeCallWithCleanUp(hipMemset(g_sumBatch4,
'\0',
(DEF_LEN_ODATA
* sizeof(float))));
CUDASafeCallWithCleanUp(hipMemset(g_sumBatch5,
'\0',
(DEF_LEN_ODATA
* sizeof(float))));
/* if time to read from input buffer */
iProcData = 0;
(void) gettimeofday(&stStop, NULL);
/*(void) printf("Time taken (barring Init()): %gs\n",
((stStop.tv_sec + (stStop.tv_usec * USEC2SEC))
- (stStart.tv_sec + (stStart.tv_usec * USEC2SEC))));*/
//return EXIT_SUCCESS;
//display number of frames in status
hashpipe_status_lock_safe(&st);
hputi4(st.buf,"NFRAMES",n_frames);
hashpipe_status_unlock_safe(&st);
// Mark output block as full and advance
demo4_output_databuf_set_filled(db_out, curblock_out);
curblock_out = (curblock_out + 1) % db_out->header.n_block;
// Mark input block as free and advance
//demo4_input_databuf_set_free(db_in, curblock_in);
//curblock_in = (curblock_in + 1) % db_in->header.n_block;
mcnt++;
/* Check for cancel */
pthread_testcancel();
return EXIT_SUCCESS;
}
CleanUp();
}
static hashpipe_thread_desc_t demo4_gpu_thread = {
name: "demo4_gpu_thread",
skey: "GPUSTAT",
init: Init,
//init: NULL,
run: run,
ibuf_desc: {demo4_input_databuf_create},
obuf_desc: {demo4_output_databuf_create}
};
static __attribute__((constructor)) void ctor()
{
register_hashpipe_thread(&demo4_gpu_thread);
}
#ifdef __cplusplus
}
#endif
| 8e204af58d9f09c2a544785cc2290c12ab2304c7.cu | /*demo4_gpu_thread.c
*
* Get two numbers from input databuffer, calculate them and write the sum to output databuffer.
*/
#ifdef __cplusplus
extern "C"{
#endif
#include <stdio.h>
#include <stdlib.h>
#include <string.h>
#include <pthread.h>
#include <sys/time.h>
#include <sys/resource.h>
#include <sys/types.h>
#include <unistd.h>
#include "hashpipe.h"
#include "demo4_databuf.h"
#include "demo4_gpu_thread.h"
#include <cuda.h>
#include <cufft.h>
#include <time.h>
#include <cuda_runtime.h>
int g_iIsDataReadDone = FALSE;
char* g_pc4Data_d = NULL; /* raw data starting address */
char* g_pc4DataRead_d = NULL; /* raw data read pointer */
int g_iNFFT = NFFT;
int g_iNFFT1 = NFFT;
int g_iNFFT2 = NFFT/2;
int g_iNFFT3 = NFFT/4;
int g_iNFFT4 = NFFT/8;
int g_iNFFT5 = NFFT/16;
int g_ISIZE1 = FFTPLAN1_ISIZE;
int g_OSIZE1 = FFTPLAN1_OSIZE;
int g_ISIZE2 = FFTPLAN2_ISIZE;
int g_OSIZE2 = FFTPLAN2_OSIZE;
int g_ISIZE3 = FFTPLAN3_ISIZE;
int g_OSIZE3 = FFTPLAN3_OSIZE;
int g_ISIZE4 = FFTPLAN4_ISIZE;
int g_OSIZE4 = FFTPLAN4_OSIZE;
int g_ISIZE5 = FFTPLAN5_ISIZE;
int g_OSIZE5 = FFTPLAN5_OSIZE;
dim3 g_dimBCopy(1, 1, 1);
dim3 g_dimGCopy(1, 1);
dim3 g_dimBAccum(1, 1, 1);
dim3 g_dimGAccum(1, 1);
int g_BatchAccumThreads;
int g_BatchAccumBlocks;
float* g_pf4FFTIn_d = NULL;
float2* g_pf4FFTOut1_d = NULL;
float2* g_pf4FFTOut2_d = NULL;
float2* g_pf4FFTOut3_d = NULL;
float2* g_pf4FFTOut4_d = NULL;
float2* g_pf4FFTOut5_d = NULL;
cufftHandle g_stPlan1 = {0};
cufftHandle g_stPlan2 = {0};
cufftHandle g_stPlan3 = {0};
cufftHandle g_stPlan4 = {0};
cufftHandle g_stPlan5 = {0};
float* g_pf4SumStokes = NULL;
float* g_pf4SumStokes_d = NULL;
float* g_sumBatch1 = NULL;
float* g_sumBatch2 = NULL;
float* g_sumBatch3 = NULL;
float* g_sumBatch4 = NULL;
float* g_sumBatch5 = NULL;
/* BUG: crash if file size is less than 32MB */
int g_iSizeRead = DEF_LEN_IDATA;
static int Init(hashpipe_thread_args_t * args)
{
int iDevCount = 0;
cudaDeviceProp stDevProp = {0};
int iRet = EXIT_SUCCESS;
cufftResult iCUFFTRet = CUFFT_SUCCESS;
int iMaxThreadsPerBlock = 0;
iRet = RegisterSignalHandlers();
if (iRet != EXIT_SUCCESS)
{
(void) fprintf(stderr, "ERROR: Signal-handler registration failed!\n");
return EXIT_FAILURE;
}
/* since CUDASafeCallWithCleanUp() calls cudaGetErrorString(),
it should not be used here - will cause crash if no CUDA device is
found */
(void) cudaGetDeviceCount(&iDevCount);
if (0 == iDevCount)
{
(void) fprintf(stderr, "ERROR: No CUDA-capable device found!\n");
return EXIT_FAILURE;
}
/* just use the first device */
CUDASafeCallWithCleanUp(cudaSetDevice(0));
CUDASafeCallWithCleanUp(cudaGetDeviceProperties(&stDevProp, 0));
iMaxThreadsPerBlock = stDevProp.maxThreadsPerBlock;
/* allocate memory for data array - 32MB is the block size for the VEGAS
input buffer */
//CUDASafeCallWithCleanUp(cudaMalloc((void **) &g_pc4DataRead_d, g_iSizeRead));
CUDASafeCallWithCleanUp(cudaMalloc((void **) &g_pc4Data_d, g_iSizeRead));
g_pc4DataRead_d = g_pc4Data_d;
/* calculate kernel parameters */
if (DEF_LEN_IDATA < iMaxThreadsPerBlock)
{
g_dimBCopy.x = DEF_LEN_IDATA;
g_dimBAccum.x = DEF_LEN_IDATA;
}
else
{
g_dimBCopy.x = iMaxThreadsPerBlock;
g_dimBAccum.x = iMaxThreadsPerBlock;
}
g_dimGCopy.x = (DEF_LEN_IDATA) / iMaxThreadsPerBlock;
g_dimGAccum.x = (DEF_LEN_IDATA) / iMaxThreadsPerBlock;
if (DEF_LEN_ODATA < iMaxThreadsPerBlock){
g_BatchAccumThreads = DEF_LEN_ODATA;
}
else{
g_BatchAccumThreads = iMaxThreadsPerBlock;
}
g_BatchAccumBlocks = DEF_LEN_ODATA/iMaxThreadsPerBlock;
CUDASafeCallWithCleanUp(cudaMalloc((void **) &g_pf4FFTIn_d, DEF_LEN_IDATA * sizeof(float)));
CUDASafeCallWithCleanUp(cudaMalloc((void **) &g_pf4FFTOut1_d, DEF_LEN_IDATA * sizeof(float2)));
CUDASafeCallWithCleanUp(cudaMalloc((void **) &g_pf4FFTOut2_d, DEF_LEN_IDATA * sizeof(float2)));
CUDASafeCallWithCleanUp(cudaMalloc((void **) &g_pf4FFTOut3_d, DEF_LEN_IDATA * sizeof(float2)));
CUDASafeCallWithCleanUp(cudaMalloc((void **) &g_pf4FFTOut4_d, DEF_LEN_IDATA * sizeof(float2)));
CUDASafeCallWithCleanUp(cudaMalloc((void **) &g_pf4FFTOut5_d, DEF_LEN_IDATA * sizeof(float2)));
g_pf4SumStokes = (float *) malloc(DEF_LEN_IDATA * sizeof(float));
if (NULL == g_pf4SumStokes)
{
(void) fprintf(stderr,
"ERROR: Memory allocation failed! %s.\n",
strerror(errno));
return EXIT_FAILURE;
}
CUDASafeCallWithCleanUp(cudaMalloc((void **) &g_pf4SumStokes_d, DEF_LEN_IDATA * sizeof(float)));
CUDASafeCallWithCleanUp(cudaMemset(g_pf4SumStokes_d, '\0', DEF_LEN_IDATA * sizeof(float)));
CUDASafeCallWithCleanUp(cudaMalloc((void **) &g_sumBatch1, DEF_LEN_ODATA * sizeof(float)));
CUDASafeCallWithCleanUp(cudaMemset(g_sumBatch1, '\0', DEF_LEN_ODATA * sizeof(float)));
CUDASafeCallWithCleanUp(cudaMalloc((void **) &g_sumBatch2, DEF_LEN_ODATA * sizeof(float)));
CUDASafeCallWithCleanUp(cudaMemset(g_sumBatch2, '\0', DEF_LEN_ODATA * sizeof(float)));
CUDASafeCallWithCleanUp(cudaMalloc((void **) &g_sumBatch3, DEF_LEN_ODATA * sizeof(float)));
CUDASafeCallWithCleanUp(cudaMemset(g_sumBatch3, '\0', DEF_LEN_ODATA * sizeof(float)));
CUDASafeCallWithCleanUp(cudaMalloc((void **) &g_sumBatch4, DEF_LEN_ODATA * sizeof(float)));
CUDASafeCallWithCleanUp(cudaMemset(g_sumBatch4, '\0', DEF_LEN_ODATA * sizeof(float)));
CUDASafeCallWithCleanUp(cudaMalloc((void **) &g_sumBatch5, DEF_LEN_ODATA * sizeof(float)));
CUDASafeCallWithCleanUp(cudaMemset(g_sumBatch5, '\0', DEF_LEN_ODATA * sizeof(float)));
/* create plan */
iCUFFTRet = cufftPlanMany(&g_stPlan1,
FFTPLAN_RANK,
&g_iNFFT1,
&g_ISIZE1,
FFTPLAN1_ISTRIDE,
FFTPLAN1_IDIST,
&g_OSIZE1,
FFTPLAN1_OSTRIDE,
FFTPLAN1_ODIST,
CUFFT_R2C,
FFTPLAN1_BATCH);
if (iCUFFTRet != CUFFT_SUCCESS)
{
(void) fprintf(stderr, "ERROR: Plan1 creation failed!\n");
return EXIT_FAILURE;
}
iCUFFTRet = cufftPlanMany(&g_stPlan2, FFTPLAN_RANK, &g_iNFFT2, &g_ISIZE2, FFTPLAN2_ISTRIDE, FFTPLAN2_IDIST,
&g_OSIZE2, FFTPLAN2_OSTRIDE, FFTPLAN2_ODIST, CUFFT_R2C, FFTPLAN2_BATCH);
if (iCUFFTRet != CUFFT_SUCCESS)
{
(void) fprintf(stderr, "ERROR: Plan2 creation failed!\n");
return EXIT_FAILURE;
}
iCUFFTRet = cufftPlanMany(&g_stPlan3, FFTPLAN_RANK, &g_iNFFT3, &g_ISIZE3, FFTPLAN3_ISTRIDE, FFTPLAN3_IDIST,
&g_OSIZE3, FFTPLAN3_OSTRIDE, FFTPLAN3_ODIST, CUFFT_R2C, FFTPLAN3_BATCH);
if (iCUFFTRet != CUFFT_SUCCESS)
{
(void) fprintf(stderr, "ERROR: Plan3 creation failed!\n");
return EXIT_FAILURE;
}
iCUFFTRet = cufftPlanMany(&g_stPlan4, FFTPLAN_RANK, &g_iNFFT4, &g_ISIZE4, FFTPLAN4_ISTRIDE, FFTPLAN4_IDIST,
&g_OSIZE4, FFTPLAN4_OSTRIDE, FFTPLAN4_ODIST, CUFFT_R2C, FFTPLAN4_BATCH);
if (iCUFFTRet != CUFFT_SUCCESS)
{
(void) fprintf(stderr, "ERROR: Plan4 creation failed!\n");
return EXIT_FAILURE;
}
iCUFFTRet = cufftPlanMany(&g_stPlan5, FFTPLAN_RANK, &g_iNFFT5, &g_ISIZE5, FFTPLAN5_ISTRIDE, FFTPLAN5_IDIST,
&g_OSIZE5, FFTPLAN5_OSTRIDE, FFTPLAN5_ODIST, CUFFT_R2C, FFTPLAN5_BATCH);
if (iCUFFTRet != CUFFT_SUCCESS)
{
(void) fprintf(stderr, "ERROR: Plan5 creation failed!\n");
return EXIT_FAILURE;
}
return EXIT_SUCCESS;
}
/* function that frees resources */
void CleanUp()
{
/* free resources */
if (g_pc4Data_d != NULL)
{
(void) cudaFree(g_pc4Data_d);
g_pc4Data_d = NULL;
}
if (g_pf4FFTIn_d != NULL)
{
(void) cudaFree(g_pf4FFTIn_d);
g_pf4FFTIn_d = NULL;
}
if (g_pf4FFTOut1_d != NULL)
{
(void) cudaFree(g_pf4FFTOut1_d);
g_pf4FFTOut1_d = NULL;
}
if (g_pf4FFTOut2_d != NULL)
{
(void) cudaFree(g_pf4FFTOut2_d);
g_pf4FFTOut2_d = NULL;
}
if (g_pf4FFTOut3_d != NULL)
{
(void) cudaFree(g_pf4FFTOut3_d);
g_pf4FFTOut3_d = NULL;
}
if (g_pf4FFTOut4_d != NULL)
{
(void) cudaFree(g_pf4FFTOut4_d);
g_pf4FFTOut4_d = NULL;
}
if (g_pf4FFTOut5_d != NULL)
{
(void) cudaFree(g_pf4FFTOut5_d);
g_pf4FFTOut5_d = NULL;
}
if (g_pf4SumStokes != NULL)
{
free(g_pf4SumStokes);
g_pf4SumStokes = NULL;
}
if (g_pf4SumStokes_d != NULL)
{
(void) cudaFree(g_pf4SumStokes_d);
g_pf4SumStokes_d = NULL;
}
if (g_sumBatch2 != NULL)
{
(void) cudaFree(g_sumBatch2);
g_sumBatch2 = NULL;
}
if (g_sumBatch1 != NULL)
{
(void) cudaFree(g_sumBatch1);
g_sumBatch1 = NULL;
}
if (g_sumBatch3 != NULL)
{
(void) cudaFree(g_sumBatch3);
g_sumBatch3 = NULL;
}
if (g_sumBatch4 != NULL)
{
(void) cudaFree(g_sumBatch4);
g_sumBatch4 = NULL;
}
if (g_sumBatch5 != NULL)
{
(void) cudaFree(g_sumBatch5);
g_sumBatch5 = NULL;
}
/* destroy plan */
/* TODO: check for plan */
(void) cufftDestroy(g_stPlan1);
(void) cufftDestroy(g_stPlan2);
(void) cufftDestroy(g_stPlan3);
(void) cufftDestroy(g_stPlan4);
(void) cufftDestroy(g_stPlan5);
/* TODO: check if open */
cpgclos();
return;
}
/*
* Registers handlers for SIGTERM and CTRL+C
*/
int RegisterSignalHandlers()
{
struct sigaction stSigHandler = {{0}};
int iRet = EXIT_SUCCESS;
/* register the CTRL+C-handling function */
stSigHandler.sa_handler = HandleStopSignals;
iRet = sigaction(SIGINT, &stSigHandler, NULL);
if (iRet != EXIT_SUCCESS)
{
(void) fprintf(stderr,
"ERROR: Handler registration failed for signal %d!\n",
SIGINT);
return EXIT_FAILURE;
}
/* register the SIGTERM-handling function */
stSigHandler.sa_handler = HandleStopSignals;
iRet = sigaction(SIGTERM, &stSigHandler, NULL);
if (iRet != EXIT_SUCCESS)
{
(void) fprintf(stderr,
"ERROR: Handler registration failed for signal %d!\n",
SIGTERM);
return EXIT_FAILURE;
}
return EXIT_SUCCESS;
}
/*
* Catches SIGTERM and CTRL+C and cleans up before exiting
*/
void HandleStopSignals(int iSigNo)
{
/* clean up */
CleanUp();
/* exit */
exit(EXIT_SUCCESS);
/* never reached */
return;
}
void __CUDASafeCallWithCleanUp(cudaError_t iRet,
const char* pcFile,
const int iLine,
void (*pCleanUp)(void))
{
if (iRet != cudaSuccess)
{
(void) fprintf(stderr,
"ERROR: File <%s>, Line %d: %s\n",
pcFile,
iLine,
cudaGetErrorString(iRet));
/* free resources */
(*pCleanUp)();
exit(EXIT_FAILURE);
}
return;
}
/*
* Prints usage information
*/
void PrintUsage(const char *pcProgName)
{
(void) printf("Usage: %s [options] <data-file>\n",
pcProgName);
(void) printf(" -h --help ");
(void) printf("Display this usage information\n");
(void) printf(" -n --nfft <value> ");
(void) printf("Number of points in FFT\n");
(void) printf(" -p --pfb ");
(void) printf("Enable PFB\n");
(void) printf(" -a --nacc <value> ");
(void) printf("Number of spectra to add\n");
(void) printf(" -s --fsamp <value> ");
(void) printf("Sampling frequency\n");
return;
}
static void *run(hashpipe_thread_args_t * args)
{
// Local aliases to shorten access to args fields
demo4_input_databuf_t *db_in = (demo4_input_databuf_t *)args->ibuf;
demo4_output_databuf_t *db_out = (demo4_output_databuf_t *)args->obuf;
hashpipe_status_t st = args->st;
const char * status_key = args->thread_desc->skey;
int rv;
uint64_t mcnt=0;
int curblock_in=0;
int curblock_out=0;
int nhits = 0;
char *data_raw; // raw data will be feed to gpu thread
data_raw = (char *)malloc(g_iSizeRead*sizeof(char));
int n_frames; // number of frames has been processed
int iRet = EXIT_SUCCESS;
int iSpecCount = 0;
int iNumAcc = DEF_ACC;
//if(iNumAcc > g_iSizeRead/DEF_LEN_IDATA){iNumAcc=g_iSizeRead/DEF_LEN_IDATA;} // if accumulation number larger than data buffer, setit to number spectra frames of buffer
int n_spec = 0; // number of spectrum
int iProcData = 0;
cudaError_t iCUDARet = cudaSuccess;
struct timeval stStart = {0};
struct timeval stStop = {0};
const char *pcProgName = NULL;
int iNextOpt = 0;
/* valid short options */
const char* const pcOptsShort = "hb:n:pa:s:";
/* valid long options */
const struct option stOptsLong[] = {
{ "help", 0, NULL, 'h' },
{ "nsub", 1, NULL, 'b' },
{ "nfft", 1, NULL, 'n' },
{ "pfb", 0, NULL, 'p' },
{ "nacc", 1, NULL, 'a' },
{ "fsamp", 1, NULL, 's' },
{ NULL, 0, NULL, 0 }
};
while (run_threads()) {
hashpipe_status_lock_safe(&st);
hputi4(st.buf, "GPUBLKIN", curblock_in);
hputs(st.buf, status_key, "waiting");
hputi4(st.buf, "GPUBKOUT", curblock_out);
hputi8(st.buf,"GPUMCNT",mcnt);
hashpipe_status_unlock_safe(&st);
n_spec = 0;
// Wait for new output block to be free
while ((rv=demo4_output_databuf_wait_free(db_out, curblock_out)) != HASHPIPE_OK) {
if (rv==HASHPIPE_TIMEOUT) {
hashpipe_status_lock_safe(&st);
hputs(st.buf, status_key, "blocked gpu out");
hashpipe_status_unlock_safe(&st);
continue;
} else {
hashpipe_error(__FUNCTION__, "error waiting for free databuf");
pthread_exit(NULL);
break;
}
}
while(iSpecCount < iNumAcc){
// Wait for new input block to be filled
while ((rv=demo4_input_databuf_wait_filled(db_in, curblock_in)) != HASHPIPE_OK) {
if (rv==HASHPIPE_TIMEOUT) {
hashpipe_status_lock_safe(&st);
hputs(st.buf, status_key, "blocked");
hashpipe_status_unlock_safe(&st);
continue;
} else {
hashpipe_error(__FUNCTION__, "error waiting for filled databuf");
pthread_exit(NULL);
break;
}
}
// Note processing status
hashpipe_status_lock_safe(&st);
hputs(st.buf, status_key, "processing gpu");
hashpipe_status_unlock_safe(&st);
//get data from input databuf to local
memcpy(data_raw,db_in->block[curblock_in].data_block,g_iSizeRead*sizeof(char));
// write new data to the gpu buffer
CUDASafeCallWithCleanUp(cudaMemcpy(g_pc4Data_d,
data_raw,
g_iSizeRead*sizeof(char),
cudaMemcpyHostToDevice));
/* whenever there is a read, reset the read pointer to the beginning */
g_pc4DataRead_d = g_pc4Data_d;
CopyDataForFFT<<<g_dimGCopy, g_dimBCopy>>>(g_pc4DataRead_d,
g_pf4FFTIn_d);
CUDASafeCallWithCleanUp(cudaThreadSynchronize());
iCUDARet = cudaGetLastError();
if (iCUDARet != cudaSuccess){
(void) fprintf(stderr,
"ERROR: File <%s>, Line %d: %s\n",
__FILE__,
__LINE__,
cudaGetErrorString(iCUDARet));
CleanUp();
}
/* do fft */
iRet = DoFFT();
if (iRet != EXIT_SUCCESS){
(void) fprintf(stderr, "ERROR! FFT failed!\n");
CleanUp();
}
BatchAccumulate<<<g_BatchAccumBlocks, g_BatchAccumThreads>>>(g_pf4FFTOut1_d,
g_pf4FFTOut2_d,
g_pf4FFTOut3_d,
g_pf4FFTOut4_d,
g_pf4FFTOut5_d,
g_sumBatch1,
g_sumBatch2,
g_sumBatch3,
g_sumBatch4,
g_sumBatch5,
DEF_LEN_ODATA
);
/*
BatchAccumulate<<<g_BatchAccumBlocks, g_BatchAccumThreads>>>(g_pf4FFTOut1_d,
1,
DEF_LEN_ODATA+1,
g_sumBatch1);
BatchAccumulate<<<g_BatchAccumBlocks, g_BatchAccumThreads>>>(g_pf4FFTOut2_d,
2,
DEF_LEN_ODATA/2+1,
g_sumBatch2);
BatchAccumulate<<<g_BatchAccumBlocks, g_BatchAccumThreads>>>(g_pf4FFTOut3_d,
4,
DEF_LEN_ODATA/4+1,
g_sumBatch3);
BatchAccumulate<<<g_BatchAccumBlocks, g_BatchAccumThreads>>>(g_pf4FFTOut4_d,
8,
DEF_LEN_ODATA/8+1,
g_sumBatch4);
BatchAccumulate<<<g_BatchAccumBlocks, g_BatchAccumThreads>>>(g_pf4FFTOut5_d,
16,
DEF_LEN_ODATA/16+1,
g_sumBatch5);
*/
CUDASafeCallWithCleanUp(cudaThreadSynchronize());
iCUDARet = cudaGetLastError();
if (iCUDARet != cudaSuccess)
{
(void) fprintf(stderr,
"ERROR: File <%s>, Line %d: %s\n",
__FILE__,
__LINE__,
cudaGetErrorString(iCUDARet));
CleanUp();
}
++iSpecCount;
// Mark input block as free and advance
demo4_input_databuf_set_free(db_in, curblock_in);
curblock_in = (curblock_in + 1) % db_in->header.n_block;
}
//store all spectrums untrimmed, concatenated into one output
/*
CUDASafeCallWithCleanUp(cudaMemcpy(g_pf4SumStokes,
g_sumBatch1,
(DEF_LEN_ODATA
* sizeof(float)),
cudaMemcpyDeviceToHost));
CUDASafeCallWithCleanUp(cudaMemcpy(g_pf4SumStokes + DEF_LEN_ODATA,
g_sumBatch2,
(DEF_LEN_ODATA/2
* sizeof(float)),
cudaMemcpyDeviceToHost));
CUDASafeCallWithCleanUp(cudaMemcpy(g_pf4SumStokes + DEF_LEN_ODATA*3/2,
g_sumBatch3,
(DEF_LEN_ODATA/4
* sizeof(float)),
cudaMemcpyDeviceToHost));
CUDASafeCallWithCleanUp(cudaMemcpy(g_pf4SumStokes + DEF_LEN_ODATA*7/4,
g_sumBatch4,
(DEF_LEN_ODATA/8
* sizeof(float)),
cudaMemcpyDeviceToHost));
CUDASafeCallWithCleanUp(cudaMemcpy(g_pf4SumStokes + DEF_LEN_ODATA*15/8,
g_sumBatch5,
(DEF_LEN_ODATA/16
* sizeof(float)),
cudaMemcpyDeviceToHost));
*/
//timmed spectrum
CUDASafeCallWithCleanUp(cudaMemcpy(g_pf4SumStokes,
g_sumBatch1+DEF_LEN_ODATA/32,
(DEF_LEN_ODATA/32
* sizeof(float)),
cudaMemcpyDeviceToHost));
CUDASafeCallWithCleanUp(cudaMemcpy(g_pf4SumStokes + DEF_LEN_ODATA/32,
g_sumBatch2+DEF_LEN_ODATA/32,
(DEF_LEN_ODATA/32
* sizeof(float)),
cudaMemcpyDeviceToHost));
CUDASafeCallWithCleanUp(cudaMemcpy(g_pf4SumStokes + DEF_LEN_ODATA/16,
g_sumBatch3+DEF_LEN_ODATA/32,
(DEF_LEN_ODATA/32
* sizeof(float)),
cudaMemcpyDeviceToHost));
CUDASafeCallWithCleanUp(cudaMemcpy(g_pf4SumStokes + DEF_LEN_ODATA*3/32,
g_sumBatch4+DEF_LEN_ODATA/32,
(DEF_LEN_ODATA/32
* sizeof(float)),
cudaMemcpyDeviceToHost));
CUDASafeCallWithCleanUp(cudaMemcpy(g_pf4SumStokes + DEF_LEN_ODATA/8,
g_sumBatch5+DEF_LEN_ODATA/32,
(DEF_LEN_ODATA/32
* sizeof(float)),
cudaMemcpyDeviceToHost));
memcpy(db_out->block[curblock_out].Stokes_Full+SIZEOF_OUT_STOKES*n_spec,g_pf4SumStokes,SIZEOF_OUT_STOKES*sizeof(float));
//printf("Stokes to output done!\n");
n_spec++;
/* reset time */
iSpecCount = 0;
/* zero accumulators */
CUDASafeCallWithCleanUp(cudaMemset(g_pf4SumStokes_d,
'\0',
(DEF_LEN_IDATA
* sizeof(float))));
CUDASafeCallWithCleanUp(cudaMemset(g_sumBatch2,
'\0',
(DEF_LEN_ODATA
* sizeof(float))));
CUDASafeCallWithCleanUp(cudaMemset(g_sumBatch1,
'\0',
(DEF_LEN_ODATA
* sizeof(float))));
CUDASafeCallWithCleanUp(cudaMemset(g_sumBatch3,
'\0',
(DEF_LEN_ODATA
* sizeof(float))));
CUDASafeCallWithCleanUp(cudaMemset(g_sumBatch4,
'\0',
(DEF_LEN_ODATA
* sizeof(float))));
CUDASafeCallWithCleanUp(cudaMemset(g_sumBatch5,
'\0',
(DEF_LEN_ODATA
* sizeof(float))));
/* if time to read from input buffer */
iProcData = 0;
(void) gettimeofday(&stStop, NULL);
/*(void) printf("Time taken (barring Init()): %gs\n",
((stStop.tv_sec + (stStop.tv_usec * USEC2SEC))
- (stStart.tv_sec + (stStart.tv_usec * USEC2SEC))));*/
//return EXIT_SUCCESS;
//display number of frames in status
hashpipe_status_lock_safe(&st);
hputi4(st.buf,"NFRAMES",n_frames);
hashpipe_status_unlock_safe(&st);
// Mark output block as full and advance
demo4_output_databuf_set_filled(db_out, curblock_out);
curblock_out = (curblock_out + 1) % db_out->header.n_block;
// Mark input block as free and advance
//demo4_input_databuf_set_free(db_in, curblock_in);
//curblock_in = (curblock_in + 1) % db_in->header.n_block;
mcnt++;
/* Check for cancel */
pthread_testcancel();
return EXIT_SUCCESS;
}
CleanUp();
}
static hashpipe_thread_desc_t demo4_gpu_thread = {
name: "demo4_gpu_thread",
skey: "GPUSTAT",
init: Init,
//init: NULL,
run: run,
ibuf_desc: {demo4_input_databuf_create},
obuf_desc: {demo4_output_databuf_create}
};
static __attribute__((constructor)) void ctor()
{
register_hashpipe_thread(&demo4_gpu_thread);
}
#ifdef __cplusplus
}
#endif
|
780fac260f2c77e4e0efb70c580070df157e5bc3.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "generators/image_batch_reader.h"
#include "core/common_cu.h"
#include <opencv2/opencv.hpp>
#include <random>
#include <thread>
__global__
void GrayImageBatchReaderKernel(const int n, const int offset, const bool between_0_and_1, const unsigned char *in, float *out)
{
int i = blockIdx.x*blockDim.x + threadIdx.x;
if (i < n) {
out[i + offset] = between_0_and_1 ? (float)in[i] / 255.0f : (((float)in[i] / 255.0f) - 0.5f) * 2;
}
}
__global__
void ColorImageBatchReaderKernel(const int n, const int offset, const bool between_0_and_1, const unsigned char *in, const int width, const int height, float *out)
{
int i = blockIdx.x*blockDim.x + threadIdx.x;
if (i < n) {
int channel = i % 3;
int input_pixel = (i - channel) / 3;
int input_col = input_pixel % width;
int input_row = (input_pixel - input_col) / width;
out[(2 - channel) * width * height + input_row * width + input_col + offset] = between_0_and_1 ? (float)in[i] / 255.0f: (((float)in[i] / 255.0f) - 0.5f) * 2;
}
}
ImageBatchReader::ImageBatchReader(deepflow::NodeParam *param) : Node(param) {
LOG_IF(FATAL, param->has_image_batch_reader_param() == false) << "param.has_image_batch_reader_param() == false";
}
void ImageBatchReader::init() {
auto image_batch_reader_param = _param->image_batch_reader_param();
_folder_path = image_batch_reader_param.folder_path();
std::experimental::filesystem::path path(_folder_path);
std::experimental::filesystem::directory_iterator dir(path);
for (auto &p : dir) {
auto path = p.path();
std::string ext = path.extension().string();
if (ext == ".png" || ext == ".jpg" || ext == ".pgm")
_list_of_files.push_back(path);
}
LOG_IF(FATAL, _list_of_files.size() == 0) << "Failed to find .png or .jpg or .pgm image files in the folder " << _folder_path;
_num_total_samples = _list_of_files.size();
_randomize = image_batch_reader_param.randomize();
_between_0_and_1 = image_batch_reader_param.between_0_and_1();
const deepflow::TensorParam &tensorParam = image_batch_reader_param.tensor_param();
switch (tensorParam.dims_size()) {
case 1:
_dims = { 1,tensorParam.dims(0),1,1 };
break;
case 2:
_dims = { tensorParam.dims(0),tensorParam.dims(1),1,1 };
break;
case 3:
_dims = { tensorParam.dims(0), 1, tensorParam.dims(1),tensorParam.dims(2) };
break;
case 4:
_dims = { tensorParam.dims(0),tensorParam.dims(1),tensorParam.dims(2),tensorParam.dims(3) };
break;
default:
LOG(FATAL) << "Unsupported shape.";
}
_batch_size = _dims[0];
_num_batches = _num_total_samples / _batch_size;
_last_batch = (_current_batch == (_num_batches - 1));
_outputs[0]->initValue(_dims);
_indices.resize(_batch_size);
}
void thread_internal_read_image(int img_index, std::string file_name, float * output, int channels, int height, int width, int num_of_blocks, int max_thread_per_block, bool between_0_and_1)
{
cv::Mat img;
if (channels == 1)
img = cv::imread(file_name, 0);
else if (channels == 3)
img = cv::imread(file_name);
else
LOG(FATAL) << "Unsupported channel size.";
LOG_IF(FATAL, img.empty()) << "Image " << file_name << " does not exist.";
LOG_IF(FATAL, img.channels() != channels) << "Provided channels doesn't match for " << file_name;
LOG_IF(FATAL, img.rows != height) << "Provided height doesn't match for " << file_name;
LOG_IF(FATAL, img.cols != width) << "Provided width doesn't match for " << file_name;
size_t img_size = img.cols * img.rows * img.channels();
unsigned char *d_img;
hipMalloc(&d_img, img_size);
DF_CUDA_CHECK(hipMemcpy(d_img, img.ptr<uchar>(), img_size, hipMemcpyHostToDevice));
if (channels == 1) {
GrayImageBatchReaderKernel << < num_of_blocks, max_thread_per_block >> > (img_size, img_index * img_size, between_0_and_1, d_img, output);
DF_KERNEL_CHECK();
}
else if (channels == 3) {
ColorImageBatchReaderKernel << < num_of_blocks, max_thread_per_block >> > (img_size, img_index * img_size, between_0_and_1, d_img, img.cols, img.rows, output);
DF_KERNEL_CHECK();
}
img.release();
hipFree(d_img);
}
void ImageBatchReader::forward()
{
_last_batch = (_current_batch >= (_num_batches - 1));
if (_last_batch) {
_current_batch = 0;
}
else {
_current_batch++;
}
if (_randomize) {
std::random_device rd;
std::mt19937 gen(rd());
std::uniform_int_distribution<> dis(0, _num_total_samples - 1);
for (int i = 0; i < _indices.size(); ++i)
_indices[i] = dis(gen);
}
else {
for (int i = _current_batch*_dims[0]; i < (_current_batch + 1)*_dims[0]; i++) {
_indices[i%_dims[0]] = i;
}
}
std::list<std::thread> thread_list;
for (int index = 0; index < _indices.size(); ++index)
{
std::string file_name = _list_of_files[_indices[index]].string();
thread_list.push_back(std::thread(thread_internal_read_image, index, file_name, (float*)_outputs[0]->value()->gpu_data(), _dims[1], _dims[2], _dims[3], numOfBlocks(_dims[1] * _dims[2] * _dims[3]), maxThreadsPerBlock, _between_0_and_1));
}
for (auto &thread : thread_list)
thread.join();
}
bool ImageBatchReader::is_last_batch() {
return _last_batch;
}
std::string ImageBatchReader::to_cpp() const
{
auto image_batch_reader_param = _param->image_batch_reader_param();
auto folder_path = image_batch_reader_param.folder_path();
auto type = image_batch_reader_param.tensor_param().type();
std::string cpp = "auto " + _name + " = df.image_batch_reader(\"" + folder_path + "\", ";
if (type == deepflow::ImageReaderParam_Type_GRAY_ONLY)
cpp += "ImageReaderParam_Type_GRAY_ONLY, ";
else
cpp += "ImageReaderParam_Type_COLOR_IF_AVAILABLE, ";
cpp += (_randomize ? "true" : "false") + std::string(", ");
cpp += "\"" + _name + "\");";
return cpp;
}
| 780fac260f2c77e4e0efb70c580070df157e5bc3.cu | #include "generators/image_batch_reader.h"
#include "core/common_cu.h"
#include <opencv2/opencv.hpp>
#include <random>
#include <thread>
__global__
void GrayImageBatchReaderKernel(const int n, const int offset, const bool between_0_and_1, const unsigned char *in, float *out)
{
int i = blockIdx.x*blockDim.x + threadIdx.x;
if (i < n) {
out[i + offset] = between_0_and_1 ? (float)in[i] / 255.0f : (((float)in[i] / 255.0f) - 0.5f) * 2;
}
}
__global__
void ColorImageBatchReaderKernel(const int n, const int offset, const bool between_0_and_1, const unsigned char *in, const int width, const int height, float *out)
{
int i = blockIdx.x*blockDim.x + threadIdx.x;
if (i < n) {
int channel = i % 3;
int input_pixel = (i - channel) / 3;
int input_col = input_pixel % width;
int input_row = (input_pixel - input_col) / width;
out[(2 - channel) * width * height + input_row * width + input_col + offset] = between_0_and_1 ? (float)in[i] / 255.0f: (((float)in[i] / 255.0f) - 0.5f) * 2;
}
}
ImageBatchReader::ImageBatchReader(deepflow::NodeParam *param) : Node(param) {
LOG_IF(FATAL, param->has_image_batch_reader_param() == false) << "param.has_image_batch_reader_param() == false";
}
void ImageBatchReader::init() {
auto image_batch_reader_param = _param->image_batch_reader_param();
_folder_path = image_batch_reader_param.folder_path();
std::experimental::filesystem::path path(_folder_path);
std::experimental::filesystem::directory_iterator dir(path);
for (auto &p : dir) {
auto path = p.path();
std::string ext = path.extension().string();
if (ext == ".png" || ext == ".jpg" || ext == ".pgm")
_list_of_files.push_back(path);
}
LOG_IF(FATAL, _list_of_files.size() == 0) << "Failed to find .png or .jpg or .pgm image files in the folder " << _folder_path;
_num_total_samples = _list_of_files.size();
_randomize = image_batch_reader_param.randomize();
_between_0_and_1 = image_batch_reader_param.between_0_and_1();
const deepflow::TensorParam &tensorParam = image_batch_reader_param.tensor_param();
switch (tensorParam.dims_size()) {
case 1:
_dims = { 1,tensorParam.dims(0),1,1 };
break;
case 2:
_dims = { tensorParam.dims(0),tensorParam.dims(1),1,1 };
break;
case 3:
_dims = { tensorParam.dims(0), 1, tensorParam.dims(1),tensorParam.dims(2) };
break;
case 4:
_dims = { tensorParam.dims(0),tensorParam.dims(1),tensorParam.dims(2),tensorParam.dims(3) };
break;
default:
LOG(FATAL) << "Unsupported shape.";
}
_batch_size = _dims[0];
_num_batches = _num_total_samples / _batch_size;
_last_batch = (_current_batch == (_num_batches - 1));
_outputs[0]->initValue(_dims);
_indices.resize(_batch_size);
}
void thread_internal_read_image(int img_index, std::string file_name, float * output, int channels, int height, int width, int num_of_blocks, int max_thread_per_block, bool between_0_and_1)
{
cv::Mat img;
if (channels == 1)
img = cv::imread(file_name, 0);
else if (channels == 3)
img = cv::imread(file_name);
else
LOG(FATAL) << "Unsupported channel size.";
LOG_IF(FATAL, img.empty()) << "Image " << file_name << " does not exist.";
LOG_IF(FATAL, img.channels() != channels) << "Provided channels doesn't match for " << file_name;
LOG_IF(FATAL, img.rows != height) << "Provided height doesn't match for " << file_name;
LOG_IF(FATAL, img.cols != width) << "Provided width doesn't match for " << file_name;
size_t img_size = img.cols * img.rows * img.channels();
unsigned char *d_img;
cudaMalloc(&d_img, img_size);
DF_CUDA_CHECK(cudaMemcpy(d_img, img.ptr<uchar>(), img_size, cudaMemcpyHostToDevice));
if (channels == 1) {
GrayImageBatchReaderKernel << < num_of_blocks, max_thread_per_block >> > (img_size, img_index * img_size, between_0_and_1, d_img, output);
DF_KERNEL_CHECK();
}
else if (channels == 3) {
ColorImageBatchReaderKernel << < num_of_blocks, max_thread_per_block >> > (img_size, img_index * img_size, between_0_and_1, d_img, img.cols, img.rows, output);
DF_KERNEL_CHECK();
}
img.release();
cudaFree(d_img);
}
void ImageBatchReader::forward()
{
_last_batch = (_current_batch >= (_num_batches - 1));
if (_last_batch) {
_current_batch = 0;
}
else {
_current_batch++;
}
if (_randomize) {
std::random_device rd;
std::mt19937 gen(rd());
std::uniform_int_distribution<> dis(0, _num_total_samples - 1);
for (int i = 0; i < _indices.size(); ++i)
_indices[i] = dis(gen);
}
else {
for (int i = _current_batch*_dims[0]; i < (_current_batch + 1)*_dims[0]; i++) {
_indices[i%_dims[0]] = i;
}
}
std::list<std::thread> thread_list;
for (int index = 0; index < _indices.size(); ++index)
{
std::string file_name = _list_of_files[_indices[index]].string();
thread_list.push_back(std::thread(thread_internal_read_image, index, file_name, (float*)_outputs[0]->value()->gpu_data(), _dims[1], _dims[2], _dims[3], numOfBlocks(_dims[1] * _dims[2] * _dims[3]), maxThreadsPerBlock, _between_0_and_1));
}
for (auto &thread : thread_list)
thread.join();
}
bool ImageBatchReader::is_last_batch() {
return _last_batch;
}
std::string ImageBatchReader::to_cpp() const
{
auto image_batch_reader_param = _param->image_batch_reader_param();
auto folder_path = image_batch_reader_param.folder_path();
auto type = image_batch_reader_param.tensor_param().type();
std::string cpp = "auto " + _name + " = df.image_batch_reader(\"" + folder_path + "\", ";
if (type == deepflow::ImageReaderParam_Type_GRAY_ONLY)
cpp += "ImageReaderParam_Type_GRAY_ONLY, ";
else
cpp += "ImageReaderParam_Type_COLOR_IF_AVAILABLE, ";
cpp += (_randomize ? "true" : "false") + std::string(", ");
cpp += "\"" + _name + "\");";
return cpp;
}
|
c01a625fabfff174f533062e1f575a5dc46003ed.hip | // !!! This is a file automatically generated by hipify!!!
#include "DTUpdateKernel.cuh"
void UpdateDTKernel(DTChunk &argDT,Document &argDoc) {
unsigned int* deviceCounter;
hipMalloc(&deviceCounter, sizeof(unsigned int));
hipMemset(deviceCounter, 0, sizeof(unsigned int));
DT_Update_Kernel << <GridDim, BlockDim >> > (argDoc.deviceMapWord2Doc, argDoc.deviceTLTopic, argDT.deviceNZDTCount, argDT.deviceDTIndex, argDT.deviceDTValue, argDoc.deviceTLDocCount, argDoc.deviceTLDocOffset, argDT.deviceDTCount, argDT.deviceDTOffset, deviceCounter, argDT.docLengthVec[argDT.chunkId], argDoc.d_dense);
H_ERR(hipDeviceSynchronize());
}
| c01a625fabfff174f533062e1f575a5dc46003ed.cu |
#include "DTUpdateKernel.cuh"
void UpdateDTKernel(DTChunk &argDT,Document &argDoc) {
unsigned int* deviceCounter;
cudaMalloc(&deviceCounter, sizeof(unsigned int));
cudaMemset(deviceCounter, 0, sizeof(unsigned int));
DT_Update_Kernel << <GridDim, BlockDim >> > (argDoc.deviceMapWord2Doc, argDoc.deviceTLTopic, argDT.deviceNZDTCount, argDT.deviceDTIndex, argDT.deviceDTValue, argDoc.deviceTLDocCount, argDoc.deviceTLDocOffset, argDT.deviceDTCount, argDT.deviceDTOffset, deviceCounter, argDT.docLengthVec[argDT.chunkId], argDoc.d_dense);
H_ERR(cudaDeviceSynchronize());
}
|
2e0f08319306a26aa0f292172a313f68869408cd.hip | // !!! This is a file automatically generated by hipify!!!
/*!
* Copyright 2021 Contributors
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
* \file graph/transform/cuda/cuda_compact_graph.cu
* \brief Functions to find and eliminate the common isolated nodes across
* all given graphs with the same set of nodes.
*/
#include <dgl/runtime/device_api.h>
#include <dgl/immutable_graph.h>
#include <hip/hip_runtime.h>
#include <utility>
#include <algorithm>
#include <memory>
#include "../../../runtime/cuda/cuda_common.h"
#include "../../heterograph.h"
#include "../compact.h"
#include "cuda_map_edges.cuh"
using namespace dgl::aten;
using namespace dgl::runtime::cuda;
using namespace dgl::transform::cuda;
namespace dgl {
namespace transform {
namespace {
/**
* \brief This function builds node maps for each node type, preserving the
* order of the input nodes. Here it is assumed the nodes are not unique,
* and thus a unique list is generated.
*
* \param input_nodes The set of input nodes.
* \param node_maps The node maps to be constructed.
* \param count_unique_device The number of unique nodes (on the GPU).
* \param unique_nodes_device The unique nodes (on the GPU).
* \param stream The stream to operate on.
*/
template<typename IdType>
void BuildNodeMaps(
const std::vector<IdArray>& input_nodes,
DeviceNodeMap<IdType> * const node_maps,
int64_t * const count_unique_device,
std::vector<IdArray>* const unique_nodes_device,
hipStream_t stream) {
const int64_t num_ntypes = static_cast<int64_t>(input_nodes.size());
CUDA_CALL(hipMemsetAsync(
count_unique_device,
0,
num_ntypes*sizeof(*count_unique_device),
stream));
// possibly duplicated nodes
for (int64_t ntype = 0; ntype < num_ntypes; ++ntype) {
const IdArray& nodes = input_nodes[ntype];
if (nodes->shape[0] > 0) {
CHECK_EQ(nodes->ctx.device_type, kDLGPU);
node_maps->LhsHashTable(ntype).FillWithDuplicates(
nodes.Ptr<IdType>(),
nodes->shape[0],
(*unique_nodes_device)[ntype].Ptr<IdType>(),
count_unique_device+ntype,
stream);
}
}
}
template<typename IdType>
std::pair<std::vector<HeteroGraphPtr>, std::vector<IdArray>>
CompactGraphsGPU(
const std::vector<HeteroGraphPtr> &graphs,
const std::vector<IdArray> &always_preserve) {
hipStream_t stream = 0;
const auto& ctx = graphs[0]->Context();
auto device = runtime::DeviceAPI::Get(ctx);
CHECK_EQ(ctx.device_type, kDLGPU);
// Step 1: Collect the nodes that has connections for each type.
const int64_t num_ntypes = graphs[0]->NumVertexTypes();
std::vector<std::vector<EdgeArray>> all_edges(graphs.size()); // all_edges[i][etype]
// count the number of nodes per type
std::vector<int64_t> max_vertex_cnt(num_ntypes, 0);
for (size_t i = 0; i < graphs.size(); ++i) {
const HeteroGraphPtr curr_graph = graphs[i];
const int64_t num_etypes = curr_graph->NumEdgeTypes();
for (IdType etype = 0; etype < num_etypes; ++etype) {
IdType srctype, dsttype;
std::tie(srctype, dsttype) = curr_graph->GetEndpointTypes(etype);
const int64_t n_edges = curr_graph->NumEdges(etype);
max_vertex_cnt[srctype] += n_edges;
max_vertex_cnt[dsttype] += n_edges;
}
}
for (size_t i = 0; i < always_preserve.size(); ++i) {
max_vertex_cnt[i] += always_preserve[i]->shape[0];
}
// gather all nodes
std::vector<IdArray> all_nodes(num_ntypes);
std::vector<int64_t> node_offsets(num_ntypes, 0);
for (int64_t ntype = 0; ntype < num_ntypes; ++ntype) {
all_nodes[ntype] = NewIdArray(max_vertex_cnt[ntype], ctx,
sizeof(IdType)*8);
// copy the nodes in always_preserve
if (ntype < always_preserve.size() && always_preserve[ntype]->shape[0] > 0) {
device->CopyDataFromTo(
always_preserve[ntype].Ptr<IdType>(), 0,
all_nodes[ntype].Ptr<IdType>(),
node_offsets[ntype],
sizeof(IdType)*always_preserve[ntype]->shape[0],
always_preserve[ntype]->ctx,
all_nodes[ntype]->ctx,
always_preserve[ntype]->dtype,
stream);
node_offsets[ntype] += sizeof(IdType)*always_preserve[ntype]->shape[0];
}
}
for (size_t i = 0; i < graphs.size(); ++i) {
const HeteroGraphPtr curr_graph = graphs[i];
const int64_t num_etypes = curr_graph->NumEdgeTypes();
all_edges[i].reserve(num_etypes);
for (int64_t etype = 0; etype < num_etypes; ++etype) {
dgl_type_t srctype, dsttype;
std::tie(srctype, dsttype) = curr_graph->GetEndpointTypes(etype);
const EdgeArray edges = curr_graph->Edges(etype, "eid");
if (edges.src.defined()) {
device->CopyDataFromTo(
edges.src.Ptr<IdType>(), 0,
all_nodes[srctype].Ptr<IdType>(),
node_offsets[srctype],
sizeof(IdType)*edges.src->shape[0],
edges.src->ctx,
all_nodes[srctype]->ctx,
edges.src->dtype,
stream);
node_offsets[srctype] += sizeof(IdType)*edges.src->shape[0];
}
if (edges.dst.defined()) {
device->CopyDataFromTo(
edges.dst.Ptr<IdType>(), 0,
all_nodes[dsttype].Ptr<IdType>(),
node_offsets[dsttype],
sizeof(IdType)*edges.dst->shape[0],
edges.dst->ctx,
all_nodes[dsttype]->ctx,
edges.dst->dtype,
stream);
node_offsets[dsttype] += sizeof(IdType)*edges.dst->shape[0];
}
all_edges[i].push_back(edges);
}
}
// Step 2: Relabel the nodes for each type to a smaller ID space
// using BuildNodeMaps
// allocate space for map creation
// the hashmap on GPU
DeviceNodeMap<IdType> node_maps(max_vertex_cnt, 0, ctx, stream);
// number of unique nodes per type on CPU
std::vector<int64_t> num_induced_nodes(num_ntypes);
// number of unique nodes per type on GPU
int64_t * count_unique_device = static_cast<int64_t*>(
device->AllocWorkspace(ctx, sizeof(int64_t)*num_ntypes));
// the set of unique nodes per type
std::vector<IdArray> induced_nodes(num_ntypes);
for (int64_t ntype = 0; ntype < num_ntypes; ++ntype) {
induced_nodes[ntype] = NewIdArray(max_vertex_cnt[ntype], ctx,
sizeof(IdType)*8);
}
BuildNodeMaps(
all_nodes,
&node_maps,
count_unique_device,
&induced_nodes,
stream);
device->CopyDataFromTo(
count_unique_device, 0,
num_induced_nodes.data(), 0,
sizeof(*num_induced_nodes.data())*num_ntypes,
ctx,
DGLContext{kDLCPU, 0},
DGLType{kDLInt, 64, 1},
stream);
device->StreamSync(ctx, stream);
// wait for the node counts to finish transferring
device->FreeWorkspace(ctx, count_unique_device);
// resize induced nodes
for (int64_t ntype = 0; ntype < num_ntypes; ++ntype) {
induced_nodes[ntype]->shape[0] = num_induced_nodes[ntype];
}
// Step 3: Remap the edges of each graph using MapEdges
std::vector<HeteroGraphPtr> new_graphs;
for (size_t i = 0; i < graphs.size(); ++i) {
const HeteroGraphPtr curr_graph = graphs[i];
const auto meta_graph = curr_graph->meta_graph();
const int64_t num_etypes = curr_graph->NumEdgeTypes();
std::vector<HeteroGraphPtr> rel_graphs;
rel_graphs.reserve(num_etypes);
std::vector<IdArray> new_src;
std::vector<IdArray> new_dst;
std::tie(new_src, new_dst) = MapEdges(
curr_graph, all_edges[i], node_maps, stream);
for (IdType etype = 0; etype < num_etypes; ++etype) {
IdType srctype, dsttype;
std::tie(srctype, dsttype) = curr_graph->GetEndpointTypes(etype);
rel_graphs.push_back(UnitGraph::CreateFromCOO(
srctype == dsttype ? 1 : 2,
induced_nodes[srctype]->shape[0],
induced_nodes[dsttype]->shape[0],
new_src[etype],
new_dst[etype]));
}
new_graphs.push_back(CreateHeteroGraph(meta_graph, rel_graphs, num_induced_nodes));
}
return std::make_pair(new_graphs, induced_nodes);
}
} // namespace
template<>
std::pair<std::vector<HeteroGraphPtr>, std::vector<IdArray>>
CompactGraphs<kDLGPU, int32_t>(
const std::vector<HeteroGraphPtr> &graphs,
const std::vector<IdArray> &always_preserve) {
return CompactGraphsGPU<int32_t>(graphs, always_preserve);
}
template<>
std::pair<std::vector<HeteroGraphPtr>, std::vector<IdArray>>
CompactGraphs<kDLGPU, int64_t>(
const std::vector<HeteroGraphPtr> &graphs,
const std::vector<IdArray> &always_preserve) {
return CompactGraphsGPU<int64_t>(graphs, always_preserve);
}
} // namespace transform
} // namespace dgl
| 2e0f08319306a26aa0f292172a313f68869408cd.cu | /*!
* Copyright 2021 Contributors
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
* \file graph/transform/cuda/cuda_compact_graph.cu
* \brief Functions to find and eliminate the common isolated nodes across
* all given graphs with the same set of nodes.
*/
#include <dgl/runtime/device_api.h>
#include <dgl/immutable_graph.h>
#include <cuda_runtime.h>
#include <utility>
#include <algorithm>
#include <memory>
#include "../../../runtime/cuda/cuda_common.h"
#include "../../heterograph.h"
#include "../compact.h"
#include "cuda_map_edges.cuh"
using namespace dgl::aten;
using namespace dgl::runtime::cuda;
using namespace dgl::transform::cuda;
namespace dgl {
namespace transform {
namespace {
/**
* \brief This function builds node maps for each node type, preserving the
* order of the input nodes. Here it is assumed the nodes are not unique,
* and thus a unique list is generated.
*
* \param input_nodes The set of input nodes.
* \param node_maps The node maps to be constructed.
* \param count_unique_device The number of unique nodes (on the GPU).
* \param unique_nodes_device The unique nodes (on the GPU).
* \param stream The stream to operate on.
*/
template<typename IdType>
void BuildNodeMaps(
const std::vector<IdArray>& input_nodes,
DeviceNodeMap<IdType> * const node_maps,
int64_t * const count_unique_device,
std::vector<IdArray>* const unique_nodes_device,
cudaStream_t stream) {
const int64_t num_ntypes = static_cast<int64_t>(input_nodes.size());
CUDA_CALL(cudaMemsetAsync(
count_unique_device,
0,
num_ntypes*sizeof(*count_unique_device),
stream));
// possibly duplicated nodes
for (int64_t ntype = 0; ntype < num_ntypes; ++ntype) {
const IdArray& nodes = input_nodes[ntype];
if (nodes->shape[0] > 0) {
CHECK_EQ(nodes->ctx.device_type, kDLGPU);
node_maps->LhsHashTable(ntype).FillWithDuplicates(
nodes.Ptr<IdType>(),
nodes->shape[0],
(*unique_nodes_device)[ntype].Ptr<IdType>(),
count_unique_device+ntype,
stream);
}
}
}
template<typename IdType>
std::pair<std::vector<HeteroGraphPtr>, std::vector<IdArray>>
CompactGraphsGPU(
const std::vector<HeteroGraphPtr> &graphs,
const std::vector<IdArray> &always_preserve) {
cudaStream_t stream = 0;
const auto& ctx = graphs[0]->Context();
auto device = runtime::DeviceAPI::Get(ctx);
CHECK_EQ(ctx.device_type, kDLGPU);
// Step 1: Collect the nodes that has connections for each type.
const int64_t num_ntypes = graphs[0]->NumVertexTypes();
std::vector<std::vector<EdgeArray>> all_edges(graphs.size()); // all_edges[i][etype]
// count the number of nodes per type
std::vector<int64_t> max_vertex_cnt(num_ntypes, 0);
for (size_t i = 0; i < graphs.size(); ++i) {
const HeteroGraphPtr curr_graph = graphs[i];
const int64_t num_etypes = curr_graph->NumEdgeTypes();
for (IdType etype = 0; etype < num_etypes; ++etype) {
IdType srctype, dsttype;
std::tie(srctype, dsttype) = curr_graph->GetEndpointTypes(etype);
const int64_t n_edges = curr_graph->NumEdges(etype);
max_vertex_cnt[srctype] += n_edges;
max_vertex_cnt[dsttype] += n_edges;
}
}
for (size_t i = 0; i < always_preserve.size(); ++i) {
max_vertex_cnt[i] += always_preserve[i]->shape[0];
}
// gather all nodes
std::vector<IdArray> all_nodes(num_ntypes);
std::vector<int64_t> node_offsets(num_ntypes, 0);
for (int64_t ntype = 0; ntype < num_ntypes; ++ntype) {
all_nodes[ntype] = NewIdArray(max_vertex_cnt[ntype], ctx,
sizeof(IdType)*8);
// copy the nodes in always_preserve
if (ntype < always_preserve.size() && always_preserve[ntype]->shape[0] > 0) {
device->CopyDataFromTo(
always_preserve[ntype].Ptr<IdType>(), 0,
all_nodes[ntype].Ptr<IdType>(),
node_offsets[ntype],
sizeof(IdType)*always_preserve[ntype]->shape[0],
always_preserve[ntype]->ctx,
all_nodes[ntype]->ctx,
always_preserve[ntype]->dtype,
stream);
node_offsets[ntype] += sizeof(IdType)*always_preserve[ntype]->shape[0];
}
}
for (size_t i = 0; i < graphs.size(); ++i) {
const HeteroGraphPtr curr_graph = graphs[i];
const int64_t num_etypes = curr_graph->NumEdgeTypes();
all_edges[i].reserve(num_etypes);
for (int64_t etype = 0; etype < num_etypes; ++etype) {
dgl_type_t srctype, dsttype;
std::tie(srctype, dsttype) = curr_graph->GetEndpointTypes(etype);
const EdgeArray edges = curr_graph->Edges(etype, "eid");
if (edges.src.defined()) {
device->CopyDataFromTo(
edges.src.Ptr<IdType>(), 0,
all_nodes[srctype].Ptr<IdType>(),
node_offsets[srctype],
sizeof(IdType)*edges.src->shape[0],
edges.src->ctx,
all_nodes[srctype]->ctx,
edges.src->dtype,
stream);
node_offsets[srctype] += sizeof(IdType)*edges.src->shape[0];
}
if (edges.dst.defined()) {
device->CopyDataFromTo(
edges.dst.Ptr<IdType>(), 0,
all_nodes[dsttype].Ptr<IdType>(),
node_offsets[dsttype],
sizeof(IdType)*edges.dst->shape[0],
edges.dst->ctx,
all_nodes[dsttype]->ctx,
edges.dst->dtype,
stream);
node_offsets[dsttype] += sizeof(IdType)*edges.dst->shape[0];
}
all_edges[i].push_back(edges);
}
}
// Step 2: Relabel the nodes for each type to a smaller ID space
// using BuildNodeMaps
// allocate space for map creation
// the hashmap on GPU
DeviceNodeMap<IdType> node_maps(max_vertex_cnt, 0, ctx, stream);
// number of unique nodes per type on CPU
std::vector<int64_t> num_induced_nodes(num_ntypes);
// number of unique nodes per type on GPU
int64_t * count_unique_device = static_cast<int64_t*>(
device->AllocWorkspace(ctx, sizeof(int64_t)*num_ntypes));
// the set of unique nodes per type
std::vector<IdArray> induced_nodes(num_ntypes);
for (int64_t ntype = 0; ntype < num_ntypes; ++ntype) {
induced_nodes[ntype] = NewIdArray(max_vertex_cnt[ntype], ctx,
sizeof(IdType)*8);
}
BuildNodeMaps(
all_nodes,
&node_maps,
count_unique_device,
&induced_nodes,
stream);
device->CopyDataFromTo(
count_unique_device, 0,
num_induced_nodes.data(), 0,
sizeof(*num_induced_nodes.data())*num_ntypes,
ctx,
DGLContext{kDLCPU, 0},
DGLType{kDLInt, 64, 1},
stream);
device->StreamSync(ctx, stream);
// wait for the node counts to finish transferring
device->FreeWorkspace(ctx, count_unique_device);
// resize induced nodes
for (int64_t ntype = 0; ntype < num_ntypes; ++ntype) {
induced_nodes[ntype]->shape[0] = num_induced_nodes[ntype];
}
// Step 3: Remap the edges of each graph using MapEdges
std::vector<HeteroGraphPtr> new_graphs;
for (size_t i = 0; i < graphs.size(); ++i) {
const HeteroGraphPtr curr_graph = graphs[i];
const auto meta_graph = curr_graph->meta_graph();
const int64_t num_etypes = curr_graph->NumEdgeTypes();
std::vector<HeteroGraphPtr> rel_graphs;
rel_graphs.reserve(num_etypes);
std::vector<IdArray> new_src;
std::vector<IdArray> new_dst;
std::tie(new_src, new_dst) = MapEdges(
curr_graph, all_edges[i], node_maps, stream);
for (IdType etype = 0; etype < num_etypes; ++etype) {
IdType srctype, dsttype;
std::tie(srctype, dsttype) = curr_graph->GetEndpointTypes(etype);
rel_graphs.push_back(UnitGraph::CreateFromCOO(
srctype == dsttype ? 1 : 2,
induced_nodes[srctype]->shape[0],
induced_nodes[dsttype]->shape[0],
new_src[etype],
new_dst[etype]));
}
new_graphs.push_back(CreateHeteroGraph(meta_graph, rel_graphs, num_induced_nodes));
}
return std::make_pair(new_graphs, induced_nodes);
}
} // namespace
template<>
std::pair<std::vector<HeteroGraphPtr>, std::vector<IdArray>>
CompactGraphs<kDLGPU, int32_t>(
const std::vector<HeteroGraphPtr> &graphs,
const std::vector<IdArray> &always_preserve) {
return CompactGraphsGPU<int32_t>(graphs, always_preserve);
}
template<>
std::pair<std::vector<HeteroGraphPtr>, std::vector<IdArray>>
CompactGraphs<kDLGPU, int64_t>(
const std::vector<HeteroGraphPtr> &graphs,
const std::vector<IdArray> &always_preserve) {
return CompactGraphsGPU<int64_t>(graphs, always_preserve);
}
} // namespace transform
} // namespace dgl
|
38ba578e3a94f564a9c0484a20b1489e1b2cd690.hip | // !!! This is a file automatically generated by hipify!!!
#include "./c_runtime_api.h"
#include <cassert>
#include <cstdio>
#include <rocblas.h>
#include <cudnn.h>
#include <hip/hip_runtime.h>
#include <math.h>
#include <iostream>
#include <string>
#define checkCUDNN(expression) \
{ \
cudnnStatus_t status = (expression); \
if (status != CUDNN_STATUS_SUCCESS) { \
std::cerr << "Error on line " << __LINE__ << ": " \
<< cudnnGetErrorString(status) << std::endl; \
std::exit(EXIT_FAILURE); \
} \
}
int setTensorDescriptor(cudnnTensorDescriptor_t activationDesc,
const int numDim,
const long shape[]) {
int batchSize = 0;
int channels = 0;
switch (numDim) {
case 2:
batchSize = shape[0];
channels = shape[1];
checkCUDNN(cudnnSetTensor4dDescriptor(activationDesc,
CUDNN_TENSOR_NCHW,
CUDNN_DATA_FLOAT,
batchSize,
channels, 1, 1));
break;
case 4:
batchSize = shape[0];
channels = shape[1];
int height = shape[2];
int width = shape[3];
checkCUDNN(cudnnSetTensor4dDescriptor(activationDesc,
CUDNN_TENSOR_NCHW,
CUDNN_DATA_FLOAT,
batchSize,
channels,
height,
width));
break;
// TODO: handle other cases and errors
}
return 0;
}
cudnnHandle_t cudnn_handler = NULL;
int cudnnReLUForward(const DLArrayHandle input, DLArrayHandle output) {
const float *input_data = (const float *) input->data;
float *output_data = (float *) output->data;
assert(input->shape[0] == output->shape[0]);
assert(input->shape[1] == output->shape[1]);
if (!cudnn_handler) {
cudnnCreate(&cudnn_handler);
}
cudnnActivationDescriptor_t activation_descriptor;
checkCUDNN(cudnnCreateActivationDescriptor(&activation_descriptor));
checkCUDNN(cudnnSetActivationDescriptor(activation_descriptor,
CUDNN_ACTIVATION_RELU, // type of activation
CUDNN_PROPAGATE_NAN, // reluNanOpt
0)); //relu_coef
cudnnTensorDescriptor_t input_descriptor;
checkCUDNN(cudnnCreateTensorDescriptor(&input_descriptor));
setTensorDescriptor(input_descriptor, input->ndim, input->shape);
cudnnTensorDescriptor_t output_descriptor;
checkCUDNN(cudnnCreateTensorDescriptor(&output_descriptor));
setTensorDescriptor(output_descriptor, output->ndim, output->shape);
const float alpha = 1.0f, beta = 0.0f;
checkCUDNN(cudnnActivationForward(cudnn_handler,
activation_descriptor,
&alpha,
input_descriptor,
input_data,
&beta,
output_descriptor,
output_data));
cudnnDestroyActivationDescriptor(activation_descriptor);
cudnnDestroyTensorDescriptor(input_descriptor);
cudnnDestroyTensorDescriptor(output_descriptor);
return 0;
}
int cudnnConv2DForward(const DLArrayHandle input,
const DLArrayHandle filter,
const DLArrayHandle bias,
const int stride_height,
const int stride_width,
const int padding_height,
const int padding_width,
DLArrayHandle output) {
const int input_dim = input->ndim;
const int output_dim = output->ndim;
assert(input_dim == 4);
assert(output_dim == 4);
const int filter_shape = filter->ndim;
assert(filter_shape == 4);
const int num_filters = filter->shape[0];
const int num_outputs = filter->shape[1];
const int filter_height = filter->shape[2];
const int filter_width = filter->shape[3];
const int bias_dim = bias->ndim;
assert(bias_dim == 1);
assert(bias->shape[0] == num_filters);
const float *input_data = (const float *) input->data;
const float *filter_date = (const float *) filter->data;
const float *bias_data = (const float *) bias->data;
float *output_data = (float *) output->data;
if (!cudnn_handler) {
cudnnCreate(&cudnn_handler);
}
// creating input and output tensors
cudnnTensorDescriptor_t input_descriptor;
checkCUDNN(cudnnCreateTensorDescriptor(&input_descriptor));
setTensorDescriptor(input_descriptor, input->ndim, input->shape);
cudnnTensorDescriptor_t output_descriptor;
checkCUDNN(cudnnCreateTensorDescriptor(&output_descriptor));
setTensorDescriptor(output_descriptor, output->ndim, output->shape);
// create filter tensors
cudnnFilterDescriptor_t filter_descriptor;
checkCUDNN(cudnnCreateFilterDescriptor(&filter_descriptor));
checkCUDNN(cudnnSetFilter4dDescriptor(filter_descriptor,
/*dataType=*/CUDNN_DATA_FLOAT,
/*format=*/CUDNN_TENSOR_NCHW,
/*out_channels=*/num_filters,
/*in_channels=*/num_outputs,
/*kernel_height=*/filter_height,
/*kernel_width=*/filter_width));
// create convolution tensor
cudnnConvolutionDescriptor_t convolution_descriptor;
checkCUDNN(cudnnCreateConvolutionDescriptor(&convolution_descriptor));
checkCUDNN(cudnnSetConvolution2dDescriptor(convolution_descriptor,
/*pad_height=*/padding_height,
/*pad_width=*/padding_width,
/*vertical_stride=*/stride_height,
/*horizontal_stride=*/stride_width,
/*dilation_height=*/1,
/*dilation_width=*/1,
/*mode=*/CUDNN_CROSS_CORRELATION,
/*computeType=*/CUDNN_DATA_FLOAT));
cudnnConvolutionFwdAlgo_t convolution_algorithm;
checkCUDNN(cudnnGetConvolutionForwardAlgorithm(cudnn_handler,
input_descriptor,
filter_descriptor,
convolution_descriptor,
output_descriptor,
CUDNN_CONVOLUTION_FWD_PREFER_FASTEST,
/*memoryLimitInBytes=*/0,
&convolution_algorithm));
size_t workspace_bytes{0};
checkCUDNN(cudnnGetConvolutionForwardWorkspaceSize(cudnn_handler,
input_descriptor,
filter_descriptor,
convolution_descriptor,
output_descriptor,
convolution_algorithm,
&workspace_bytes));
//std::cerr << "Workspace size: " << (workspace_bytes / 1048576.0) << "MB" << std::endl;
assert(workspace_bytes > 0);
void *d_workspace{nullptr};
hipMalloc(&d_workspace, workspace_bytes);
const float alpha = 1.0f, beta = 0.0f;
checkCUDNN(cudnnConvolutionForward(cudnn_handler,
&alpha,
input_descriptor,
input_data,
filter_descriptor,
filter_date,
convolution_descriptor,
convolution_algorithm,
d_workspace,
workspace_bytes,
&beta,
output_descriptor,
output_data));
// adding bias tensor
cudnnTensorDescriptor_t bias_descriptor;
checkCUDNN(cudnnCreateTensorDescriptor(&bias_descriptor));
//setTensorDescriptor(bias_descriptor, bias->ndim, bias->shape);
checkCUDNN(cudnnSetTensor4dDescriptor(bias_descriptor,
CUDNN_TENSOR_NCHW,
CUDNN_DATA_FLOAT,
1,
num_filters,
1,
1));
checkCUDNN(cudnnAddTensor(cudnn_handler,
&alpha,
bias_descriptor,
bias_data,
&alpha,
output_descriptor,
output_data));
hipFree(d_workspace);
cudnnDestroyTensorDescriptor(input_descriptor);
cudnnDestroyTensorDescriptor(output_descriptor);
cudnnDestroyFilterDescriptor(filter_descriptor);
cudnnDestroyTensorDescriptor(bias_descriptor);
cudnnDestroyConvolutionDescriptor(convolution_descriptor);
return 0;
}
int cudnnConv2DBackwardBias(const DLArrayHandle output_grads,
DLArrayHandle bias_grads) {
const float *output_grads_data = (const float *) output_grads->data;
float *bias_grads_data = (float *) bias_grads->data;
const int bias_grads_dim = bias_grads->ndim;
assert(bias_grads_dim == 1);
const int num_filters = bias_grads->shape[0];
if (!cudnn_handler) {
cudnnCreate(&cudnn_handler);
}
// creating output_grads descriptor
cudnnTensorDescriptor_t output_grads_descriptor;
checkCUDNN(cudnnCreateTensorDescriptor(&output_grads_descriptor));
setTensorDescriptor(output_grads_descriptor, output_grads->ndim, output_grads->shape);
// bias descriptor
cudnnTensorDescriptor_t bias_descriptor;
checkCUDNN(cudnnCreateTensorDescriptor(&bias_descriptor));
checkCUDNN(cudnnSetTensor4dDescriptor(bias_descriptor,
CUDNN_TENSOR_NCHW,
CUDNN_DATA_FLOAT,
1,
num_filters,
1,
1));
const float alpha = 1.0f, beta = 0.0f;
checkCUDNN(cudnnConvolutionBackwardBias(cudnn_handler,
&alpha,
output_grads_descriptor,
output_grads_data,
&beta,
bias_descriptor,
bias_grads_data
));
cudnnDestroyTensorDescriptor(bias_descriptor);
cudnnDestroyTensorDescriptor(output_grads_descriptor);
return 0;
}
int cudnnConv2DBackwardData(const DLArrayHandle filter,
const DLArrayHandle output_grads,
const int stride_height,
const int stride_width,
const int padding_height,
const int padding_width,
DLArrayHandle data_grad) {
//const int input_dim = input->ndim;
const int data_grad_dim = data_grad->ndim;
//assert(input_dim == 4);
assert(data_grad_dim == 4);
const int filter_shape = filter->ndim;
assert(filter_shape == 4);
const int num_filters = filter->shape[0];
const int num_outputs = filter->shape[1];
const int filter_height = filter->shape[2];
const int filter_width = filter->shape[3];
//const float *input_data = (const float *) input->data;
const float *filter_date = (const float *) filter->data;
const float *output_grads_data = (const float *) output_grads->data;
float *data_grad_data = (float *) data_grad->data;
if (!cudnn_handler) {
cudnnCreate(&cudnn_handler);
}
// creating output_grads descriptor
cudnnTensorDescriptor_t output_grads_descriptor;
checkCUDNN(cudnnCreateTensorDescriptor(&output_grads_descriptor));
setTensorDescriptor(output_grads_descriptor, output_grads->ndim, output_grads->shape);
// create convolution tensor
cudnnConvolutionDescriptor_t convolution_descriptor;
checkCUDNN(cudnnCreateConvolutionDescriptor(&convolution_descriptor));
checkCUDNN(cudnnSetConvolution2dDescriptor(convolution_descriptor,
/*pad_height=*/padding_height,
/*pad_width=*/padding_width,
/*vertical_stride=*/stride_height,
/*horizontal_stride=*/stride_width,
/*dilation_height=*/1,
/*dilation_width=*/1,
/*mode=*/CUDNN_CROSS_CORRELATION,
/*computeType=*/CUDNN_DATA_FLOAT));
// create filter tensors
cudnnFilterDescriptor_t filter_descriptor;
checkCUDNN(cudnnCreateFilterDescriptor(&filter_descriptor));
checkCUDNN(cudnnSetFilter4dDescriptor(filter_descriptor,
/*dataType=*/CUDNN_DATA_FLOAT,
/*format=*/CUDNN_TENSOR_NCHW,
/*out_channels=*/num_filters,
/*in_channels=*/num_outputs,
/*kernel_height=*/filter_height,
/*kernel_width=*/filter_width));
cudnnTensorDescriptor_t data_grads_descriptor;
checkCUDNN(cudnnCreateTensorDescriptor(&data_grads_descriptor));
setTensorDescriptor(data_grads_descriptor, data_grad->ndim, data_grad->shape);
cudnnConvolutionBwdDataAlgo_t backward_data_algo;
checkCUDNN(cudnnGetConvolutionBackwardDataAlgorithm(cudnn_handler,
filter_descriptor,
output_grads_descriptor,
convolution_descriptor,
data_grads_descriptor,
CUDNN_CONVOLUTION_BWD_DATA_PREFER_FASTEST,
0,
&backward_data_algo));
size_t workspace_bytes{0};
checkCUDNN(cudnnGetConvolutionBackwardDataWorkspaceSize(cudnn_handler,
filter_descriptor,
output_grads_descriptor,
convolution_descriptor,
data_grads_descriptor,
backward_data_algo,
&workspace_bytes));
//std::cout << "workspace size: " << workspace_bytes << std::endl;
void *d_workspace{nullptr};
hipMalloc(&d_workspace, workspace_bytes);
const float alpha = 1.0f, beta = 0.0f;
checkCUDNN(cudnnConvolutionBackwardData(cudnn_handler,
&alpha,
filter_descriptor,
filter_date,
output_grads_descriptor,
output_grads_data,
convolution_descriptor,
backward_data_algo,
d_workspace,
workspace_bytes,
&beta,
data_grads_descriptor,
data_grad_data));
// Release resources
hipFree(d_workspace);
cudnnDestroyTensorDescriptor(data_grads_descriptor);
cudnnDestroyTensorDescriptor(output_grads_descriptor);
cudnnDestroyFilterDescriptor(filter_descriptor);
cudnnDestroyConvolutionDescriptor(convolution_descriptor);
//std::cout << "leaveing cudnnConv2DBackwardData" << std::endl;
return 0;
}
int cudnnConv2DBackwardFilter(const DLArrayHandle input,
const DLArrayHandle output_grads,
const int stride_height,
const int stride_width,
const int padding_height,
const int padding_width,
DLArrayHandle filter_grad) {
const int input_dim = input->ndim;
const int filter_dim = filter_grad->ndim;
//const int filter_grad_dim = filter_grad->ndim;
assert(input_dim == 4);
assert(filter_dim == 4);
//assert(filter_grad_dim == filter_dim);
const int num_filters = filter_grad->shape[0];
const int num_outputs = filter_grad->shape[1];
const int filter_height = filter_grad->shape[2];
const int filter_width = filter_grad->shape[3];
const float *input_data = (const float *) input->data;
const float *output_grads_data = (const float *) output_grads->data;
//const float *filter_date = (const float *) filter->data;
float *filter_grad_data = (float *) filter_grad->data;
//cudnnHandle_t cudnn;
//cudnnCreate(&cudnn);
if (!cudnn_handler) {
cudnnCreate(&cudnn_handler);
}
// creating input descriptor
cudnnTensorDescriptor_t input_descriptor;
checkCUDNN(cudnnCreateTensorDescriptor(&input_descriptor));
setTensorDescriptor(input_descriptor, input->ndim, input->shape);
// creating output_grads descriptor
cudnnTensorDescriptor_t output_grads_descriptor;
checkCUDNN(cudnnCreateTensorDescriptor(&output_grads_descriptor));
setTensorDescriptor(output_grads_descriptor, output_grads->ndim, output_grads->shape);
// create convolution tensor
cudnnConvolutionDescriptor_t convolution_descriptor;
checkCUDNN(cudnnCreateConvolutionDescriptor(&convolution_descriptor));
checkCUDNN(cudnnSetConvolution2dDescriptor(convolution_descriptor,
/*pad_height=*/padding_height,
/*pad_width=*/padding_width,
/*vertical_stride=*/stride_height,
/*horizontal_stride=*/stride_width,
/*dilation_height=*/1,
/*dilation_width=*/1,
/*mode=*/CUDNN_CROSS_CORRELATION,
/*computeType=*/CUDNN_DATA_FLOAT));
// create filter tensors
cudnnFilterDescriptor_t filter_descriptor;
checkCUDNN(cudnnCreateFilterDescriptor(&filter_descriptor));
checkCUDNN(cudnnSetFilter4dDescriptor(filter_descriptor,
/*dataType=*/CUDNN_DATA_FLOAT,
/*format=*/CUDNN_TENSOR_NCHW,
/*out_channels=*/num_filters,
/*in_channels=*/num_outputs,
/*kernel_height=*/filter_height,
/*kernel_width=*/filter_width));
cudnnConvolutionBwdFilterAlgo_t backward_filter_algo;
checkCUDNN(cudnnGetConvolutionBackwardFilterAlgorithm(cudnn_handler,
input_descriptor,
output_grads_descriptor,
convolution_descriptor,
filter_descriptor,
CUDNN_CONVOLUTION_BWD_FILTER_PREFER_FASTEST,
0,
&backward_filter_algo));
size_t workspace_bytes{0};
checkCUDNN(cudnnGetConvolutionBackwardFilterWorkspaceSize(cudnn_handler,
input_descriptor,
output_grads_descriptor,
convolution_descriptor,
filter_descriptor,
backward_filter_algo,
&workspace_bytes));
void *d_workspace{nullptr};
hipMalloc(&d_workspace, workspace_bytes);
const float alpha = 1.0f, beta = 0.0f;
checkCUDNN(cudnnConvolutionBackwardFilter(cudnn_handler,
&alpha,
input_descriptor,
input_data,
output_grads_descriptor,
output_grads_data,
convolution_descriptor,
backward_filter_algo,
d_workspace,
workspace_bytes,
&beta,
filter_descriptor,
filter_grad_data));
hipFree(d_workspace);
cudnnDestroyTensorDescriptor(input_descriptor);
cudnnDestroyTensorDescriptor(output_grads_descriptor);
cudnnDestroyFilterDescriptor(filter_descriptor);
cudnnDestroyConvolutionDescriptor(convolution_descriptor);
return 0;
}
int cudnnPoolForward(const DLArrayHandle input,
const int pooling_height,
const int pooling_width,
const int stride_height,
const int stride_width,
const char *mode,
DLArrayHandle output) {
const int input_dim = input->ndim;
const int output_dim = output->ndim;
assert(input_dim == 4);
assert(output_dim == 4);
const float *input_data = (const float *) input->data;
float *output_data = (float *) output->data;
cudnnPoolingMode_t pooling_mode = CUDNN_POOLING_MAX;
std::string str_mode(mode);
if (str_mode.compare("average") == 0) {
std::cout << str_mode << std::endl;
pooling_mode = CUDNN_POOLING_AVERAGE_COUNT_INCLUDE_PADDING;
std::cout << pooling_mode << std::endl;
}
if (!cudnn_handler) {
cudnnCreate(&cudnn_handler);
}
// creating input and output tensors
cudnnTensorDescriptor_t input_descriptor;
checkCUDNN(cudnnCreateTensorDescriptor(&input_descriptor));
setTensorDescriptor(input_descriptor, input->ndim, input->shape);
cudnnTensorDescriptor_t output_descriptor;
checkCUDNN(cudnnCreateTensorDescriptor(&output_descriptor));
setTensorDescriptor(output_descriptor, output->ndim, output->shape);
cudnnPoolingDescriptor_t pooling_descriptor;
checkCUDNN(cudnnCreatePoolingDescriptor(&pooling_descriptor));
checkCUDNN(cudnnSetPooling2dDescriptor(pooling_descriptor,
pooling_mode,
CUDNN_PROPAGATE_NAN,
pooling_height,
pooling_width,
0, // TODO: parameterize vertical padding
0, // TODO: parameterize horizontal padding
stride_height,
stride_width));
const float alpha = 1.0f, beta = 0.0f;
checkCUDNN(cudnnPoolingForward(cudnn_handler,
pooling_descriptor,
&alpha,
input_descriptor,
input_data,
&beta,
output_descriptor,
output_data));
cudnnDestroyTensorDescriptor(input_descriptor);
cudnnDestroyTensorDescriptor(output_descriptor);
cudnnDestroyPoolingDescriptor(pooling_descriptor);
return 0;
}
int cudnnPoolBackward(const DLArrayHandle input,
const DLArrayHandle output_grads,
const DLArrayHandle output,
const int pooling_height,
const int pooling_width,
const int stride_height,
const int stride_width,
const char *mode,
DLArrayHandle pool_grad) {
const int input_dim = input->ndim;
const int output_dim = output->ndim;
const int output_grads_dim = output_grads->ndim;
const int pool_grad_dim = pool_grad->ndim;
assert(input_dim == 4);
assert(output_dim == 4);
assert(output_grads_dim == 4);
assert(pool_grad_dim == 4);
const float *input_data = (const float*) input->data;
const float *output_data = (const float*) output->data;
const float *output_grads_data = (const float*) output_grads->data;
float *pool_grad_data = (float*) pool_grad->data;
cudnnPoolingMode_t pooling_mode = CUDNN_POOLING_MAX;
std::string str_mode(mode);
if (str_mode.compare("average") == 0) {
std::cout << str_mode << std::endl;
pooling_mode = CUDNN_POOLING_AVERAGE_COUNT_INCLUDE_PADDING;
std::cout << pooling_mode << std::endl;
}
if (!cudnn_handler) {
cudnnCreate(&cudnn_handler);
}
// input descriptor
cudnnTensorDescriptor_t input_descriptor;
checkCUDNN(cudnnCreateTensorDescriptor(&input_descriptor));
setTensorDescriptor(input_descriptor, input->ndim, input->shape);
// ouput descriptor
cudnnTensorDescriptor_t output_descriptor;
checkCUDNN(cudnnCreateTensorDescriptor(&output_descriptor));
setTensorDescriptor(output_descriptor, output->ndim, output->shape);
// output grad descriptor
cudnnTensorDescriptor_t output_grad_descriptor;
checkCUDNN(cudnnCreateTensorDescriptor(&output_grad_descriptor));
setTensorDescriptor(output_grad_descriptor, output_grads->ndim, output_grads->shape);
// pool grad descriptor
cudnnTensorDescriptor_t pool_grad_descriptor;
checkCUDNN(cudnnCreateTensorDescriptor(&pool_grad_descriptor));
setTensorDescriptor(pool_grad_descriptor, pool_grad->ndim, pool_grad->shape);
// TODO: reuse already defined pooling descriptor in forward pass
cudnnPoolingDescriptor_t pooling_descriptor;
checkCUDNN(cudnnCreatePoolingDescriptor(&pooling_descriptor));
checkCUDNN(cudnnSetPooling2dDescriptor(pooling_descriptor,
pooling_mode,
CUDNN_PROPAGATE_NAN,
pooling_height,
pooling_width,
0, // TODO: parameterize vertical padding
0, // TODO: parameterize horizontal padding
stride_height,
stride_width));
const float alpha = 1.0f, beta = 0.0f;
checkCUDNN(cudnnPoolingBackward(cudnn_handler,
pooling_descriptor,
&alpha,
output_descriptor,
output_data,
output_grad_descriptor,
output_grads_data,
input_descriptor,
input_data,
&beta,
pool_grad_descriptor,
pool_grad_data));
cudnnDestroyTensorDescriptor(input_descriptor);
cudnnDestroyTensorDescriptor(output_descriptor);
cudnnDestroyTensorDescriptor(output_grad_descriptor);
cudnnDestroyTensorDescriptor(pool_grad_descriptor);
cudnnDestroyPoolingDescriptor(pooling_descriptor);
return 0;
}
| 38ba578e3a94f564a9c0484a20b1489e1b2cd690.cu | #include "./c_runtime_api.h"
#include <cassert>
#include <cstdio>
#include <cublas_v2.h>
#include <cudnn.h>
#include <cuda_runtime.h>
#include <math.h>
#include <iostream>
#include <string>
#define checkCUDNN(expression) \
{ \
cudnnStatus_t status = (expression); \
if (status != CUDNN_STATUS_SUCCESS) { \
std::cerr << "Error on line " << __LINE__ << ": " \
<< cudnnGetErrorString(status) << std::endl; \
std::exit(EXIT_FAILURE); \
} \
}
int setTensorDescriptor(cudnnTensorDescriptor_t activationDesc,
const int numDim,
const long shape[]) {
int batchSize = 0;
int channels = 0;
switch (numDim) {
case 2:
batchSize = shape[0];
channels = shape[1];
checkCUDNN(cudnnSetTensor4dDescriptor(activationDesc,
CUDNN_TENSOR_NCHW,
CUDNN_DATA_FLOAT,
batchSize,
channels, 1, 1));
break;
case 4:
batchSize = shape[0];
channels = shape[1];
int height = shape[2];
int width = shape[3];
checkCUDNN(cudnnSetTensor4dDescriptor(activationDesc,
CUDNN_TENSOR_NCHW,
CUDNN_DATA_FLOAT,
batchSize,
channels,
height,
width));
break;
// TODO: handle other cases and errors
}
return 0;
}
cudnnHandle_t cudnn_handler = NULL;
int cudnnReLUForward(const DLArrayHandle input, DLArrayHandle output) {
const float *input_data = (const float *) input->data;
float *output_data = (float *) output->data;
assert(input->shape[0] == output->shape[0]);
assert(input->shape[1] == output->shape[1]);
if (!cudnn_handler) {
cudnnCreate(&cudnn_handler);
}
cudnnActivationDescriptor_t activation_descriptor;
checkCUDNN(cudnnCreateActivationDescriptor(&activation_descriptor));
checkCUDNN(cudnnSetActivationDescriptor(activation_descriptor,
CUDNN_ACTIVATION_RELU, // type of activation
CUDNN_PROPAGATE_NAN, // reluNanOpt
0)); //relu_coef
cudnnTensorDescriptor_t input_descriptor;
checkCUDNN(cudnnCreateTensorDescriptor(&input_descriptor));
setTensorDescriptor(input_descriptor, input->ndim, input->shape);
cudnnTensorDescriptor_t output_descriptor;
checkCUDNN(cudnnCreateTensorDescriptor(&output_descriptor));
setTensorDescriptor(output_descriptor, output->ndim, output->shape);
const float alpha = 1.0f, beta = 0.0f;
checkCUDNN(cudnnActivationForward(cudnn_handler,
activation_descriptor,
&alpha,
input_descriptor,
input_data,
&beta,
output_descriptor,
output_data));
cudnnDestroyActivationDescriptor(activation_descriptor);
cudnnDestroyTensorDescriptor(input_descriptor);
cudnnDestroyTensorDescriptor(output_descriptor);
return 0;
}
int cudnnConv2DForward(const DLArrayHandle input,
const DLArrayHandle filter,
const DLArrayHandle bias,
const int stride_height,
const int stride_width,
const int padding_height,
const int padding_width,
DLArrayHandle output) {
const int input_dim = input->ndim;
const int output_dim = output->ndim;
assert(input_dim == 4);
assert(output_dim == 4);
const int filter_shape = filter->ndim;
assert(filter_shape == 4);
const int num_filters = filter->shape[0];
const int num_outputs = filter->shape[1];
const int filter_height = filter->shape[2];
const int filter_width = filter->shape[3];
const int bias_dim = bias->ndim;
assert(bias_dim == 1);
assert(bias->shape[0] == num_filters);
const float *input_data = (const float *) input->data;
const float *filter_date = (const float *) filter->data;
const float *bias_data = (const float *) bias->data;
float *output_data = (float *) output->data;
if (!cudnn_handler) {
cudnnCreate(&cudnn_handler);
}
// creating input and output tensors
cudnnTensorDescriptor_t input_descriptor;
checkCUDNN(cudnnCreateTensorDescriptor(&input_descriptor));
setTensorDescriptor(input_descriptor, input->ndim, input->shape);
cudnnTensorDescriptor_t output_descriptor;
checkCUDNN(cudnnCreateTensorDescriptor(&output_descriptor));
setTensorDescriptor(output_descriptor, output->ndim, output->shape);
// create filter tensors
cudnnFilterDescriptor_t filter_descriptor;
checkCUDNN(cudnnCreateFilterDescriptor(&filter_descriptor));
checkCUDNN(cudnnSetFilter4dDescriptor(filter_descriptor,
/*dataType=*/CUDNN_DATA_FLOAT,
/*format=*/CUDNN_TENSOR_NCHW,
/*out_channels=*/num_filters,
/*in_channels=*/num_outputs,
/*kernel_height=*/filter_height,
/*kernel_width=*/filter_width));
// create convolution tensor
cudnnConvolutionDescriptor_t convolution_descriptor;
checkCUDNN(cudnnCreateConvolutionDescriptor(&convolution_descriptor));
checkCUDNN(cudnnSetConvolution2dDescriptor(convolution_descriptor,
/*pad_height=*/padding_height,
/*pad_width=*/padding_width,
/*vertical_stride=*/stride_height,
/*horizontal_stride=*/stride_width,
/*dilation_height=*/1,
/*dilation_width=*/1,
/*mode=*/CUDNN_CROSS_CORRELATION,
/*computeType=*/CUDNN_DATA_FLOAT));
cudnnConvolutionFwdAlgo_t convolution_algorithm;
checkCUDNN(cudnnGetConvolutionForwardAlgorithm(cudnn_handler,
input_descriptor,
filter_descriptor,
convolution_descriptor,
output_descriptor,
CUDNN_CONVOLUTION_FWD_PREFER_FASTEST,
/*memoryLimitInBytes=*/0,
&convolution_algorithm));
size_t workspace_bytes{0};
checkCUDNN(cudnnGetConvolutionForwardWorkspaceSize(cudnn_handler,
input_descriptor,
filter_descriptor,
convolution_descriptor,
output_descriptor,
convolution_algorithm,
&workspace_bytes));
//std::cerr << "Workspace size: " << (workspace_bytes / 1048576.0) << "MB" << std::endl;
assert(workspace_bytes > 0);
void *d_workspace{nullptr};
cudaMalloc(&d_workspace, workspace_bytes);
const float alpha = 1.0f, beta = 0.0f;
checkCUDNN(cudnnConvolutionForward(cudnn_handler,
&alpha,
input_descriptor,
input_data,
filter_descriptor,
filter_date,
convolution_descriptor,
convolution_algorithm,
d_workspace,
workspace_bytes,
&beta,
output_descriptor,
output_data));
// adding bias tensor
cudnnTensorDescriptor_t bias_descriptor;
checkCUDNN(cudnnCreateTensorDescriptor(&bias_descriptor));
//setTensorDescriptor(bias_descriptor, bias->ndim, bias->shape);
checkCUDNN(cudnnSetTensor4dDescriptor(bias_descriptor,
CUDNN_TENSOR_NCHW,
CUDNN_DATA_FLOAT,
1,
num_filters,
1,
1));
checkCUDNN(cudnnAddTensor(cudnn_handler,
&alpha,
bias_descriptor,
bias_data,
&alpha,
output_descriptor,
output_data));
cudaFree(d_workspace);
cudnnDestroyTensorDescriptor(input_descriptor);
cudnnDestroyTensorDescriptor(output_descriptor);
cudnnDestroyFilterDescriptor(filter_descriptor);
cudnnDestroyTensorDescriptor(bias_descriptor);
cudnnDestroyConvolutionDescriptor(convolution_descriptor);
return 0;
}
int cudnnConv2DBackwardBias(const DLArrayHandle output_grads,
DLArrayHandle bias_grads) {
const float *output_grads_data = (const float *) output_grads->data;
float *bias_grads_data = (float *) bias_grads->data;
const int bias_grads_dim = bias_grads->ndim;
assert(bias_grads_dim == 1);
const int num_filters = bias_grads->shape[0];
if (!cudnn_handler) {
cudnnCreate(&cudnn_handler);
}
// creating output_grads descriptor
cudnnTensorDescriptor_t output_grads_descriptor;
checkCUDNN(cudnnCreateTensorDescriptor(&output_grads_descriptor));
setTensorDescriptor(output_grads_descriptor, output_grads->ndim, output_grads->shape);
// bias descriptor
cudnnTensorDescriptor_t bias_descriptor;
checkCUDNN(cudnnCreateTensorDescriptor(&bias_descriptor));
checkCUDNN(cudnnSetTensor4dDescriptor(bias_descriptor,
CUDNN_TENSOR_NCHW,
CUDNN_DATA_FLOAT,
1,
num_filters,
1,
1));
const float alpha = 1.0f, beta = 0.0f;
checkCUDNN(cudnnConvolutionBackwardBias(cudnn_handler,
&alpha,
output_grads_descriptor,
output_grads_data,
&beta,
bias_descriptor,
bias_grads_data
));
cudnnDestroyTensorDescriptor(bias_descriptor);
cudnnDestroyTensorDescriptor(output_grads_descriptor);
return 0;
}
int cudnnConv2DBackwardData(const DLArrayHandle filter,
const DLArrayHandle output_grads,
const int stride_height,
const int stride_width,
const int padding_height,
const int padding_width,
DLArrayHandle data_grad) {
//const int input_dim = input->ndim;
const int data_grad_dim = data_grad->ndim;
//assert(input_dim == 4);
assert(data_grad_dim == 4);
const int filter_shape = filter->ndim;
assert(filter_shape == 4);
const int num_filters = filter->shape[0];
const int num_outputs = filter->shape[1];
const int filter_height = filter->shape[2];
const int filter_width = filter->shape[3];
//const float *input_data = (const float *) input->data;
const float *filter_date = (const float *) filter->data;
const float *output_grads_data = (const float *) output_grads->data;
float *data_grad_data = (float *) data_grad->data;
if (!cudnn_handler) {
cudnnCreate(&cudnn_handler);
}
// creating output_grads descriptor
cudnnTensorDescriptor_t output_grads_descriptor;
checkCUDNN(cudnnCreateTensorDescriptor(&output_grads_descriptor));
setTensorDescriptor(output_grads_descriptor, output_grads->ndim, output_grads->shape);
// create convolution tensor
cudnnConvolutionDescriptor_t convolution_descriptor;
checkCUDNN(cudnnCreateConvolutionDescriptor(&convolution_descriptor));
checkCUDNN(cudnnSetConvolution2dDescriptor(convolution_descriptor,
/*pad_height=*/padding_height,
/*pad_width=*/padding_width,
/*vertical_stride=*/stride_height,
/*horizontal_stride=*/stride_width,
/*dilation_height=*/1,
/*dilation_width=*/1,
/*mode=*/CUDNN_CROSS_CORRELATION,
/*computeType=*/CUDNN_DATA_FLOAT));
// create filter tensors
cudnnFilterDescriptor_t filter_descriptor;
checkCUDNN(cudnnCreateFilterDescriptor(&filter_descriptor));
checkCUDNN(cudnnSetFilter4dDescriptor(filter_descriptor,
/*dataType=*/CUDNN_DATA_FLOAT,
/*format=*/CUDNN_TENSOR_NCHW,
/*out_channels=*/num_filters,
/*in_channels=*/num_outputs,
/*kernel_height=*/filter_height,
/*kernel_width=*/filter_width));
cudnnTensorDescriptor_t data_grads_descriptor;
checkCUDNN(cudnnCreateTensorDescriptor(&data_grads_descriptor));
setTensorDescriptor(data_grads_descriptor, data_grad->ndim, data_grad->shape);
cudnnConvolutionBwdDataAlgo_t backward_data_algo;
checkCUDNN(cudnnGetConvolutionBackwardDataAlgorithm(cudnn_handler,
filter_descriptor,
output_grads_descriptor,
convolution_descriptor,
data_grads_descriptor,
CUDNN_CONVOLUTION_BWD_DATA_PREFER_FASTEST,
0,
&backward_data_algo));
size_t workspace_bytes{0};
checkCUDNN(cudnnGetConvolutionBackwardDataWorkspaceSize(cudnn_handler,
filter_descriptor,
output_grads_descriptor,
convolution_descriptor,
data_grads_descriptor,
backward_data_algo,
&workspace_bytes));
//std::cout << "workspace size: " << workspace_bytes << std::endl;
void *d_workspace{nullptr};
cudaMalloc(&d_workspace, workspace_bytes);
const float alpha = 1.0f, beta = 0.0f;
checkCUDNN(cudnnConvolutionBackwardData(cudnn_handler,
&alpha,
filter_descriptor,
filter_date,
output_grads_descriptor,
output_grads_data,
convolution_descriptor,
backward_data_algo,
d_workspace,
workspace_bytes,
&beta,
data_grads_descriptor,
data_grad_data));
// Release resources
cudaFree(d_workspace);
cudnnDestroyTensorDescriptor(data_grads_descriptor);
cudnnDestroyTensorDescriptor(output_grads_descriptor);
cudnnDestroyFilterDescriptor(filter_descriptor);
cudnnDestroyConvolutionDescriptor(convolution_descriptor);
//std::cout << "leaveing cudnnConv2DBackwardData" << std::endl;
return 0;
}
int cudnnConv2DBackwardFilter(const DLArrayHandle input,
const DLArrayHandle output_grads,
const int stride_height,
const int stride_width,
const int padding_height,
const int padding_width,
DLArrayHandle filter_grad) {
const int input_dim = input->ndim;
const int filter_dim = filter_grad->ndim;
//const int filter_grad_dim = filter_grad->ndim;
assert(input_dim == 4);
assert(filter_dim == 4);
//assert(filter_grad_dim == filter_dim);
const int num_filters = filter_grad->shape[0];
const int num_outputs = filter_grad->shape[1];
const int filter_height = filter_grad->shape[2];
const int filter_width = filter_grad->shape[3];
const float *input_data = (const float *) input->data;
const float *output_grads_data = (const float *) output_grads->data;
//const float *filter_date = (const float *) filter->data;
float *filter_grad_data = (float *) filter_grad->data;
//cudnnHandle_t cudnn;
//cudnnCreate(&cudnn);
if (!cudnn_handler) {
cudnnCreate(&cudnn_handler);
}
// creating input descriptor
cudnnTensorDescriptor_t input_descriptor;
checkCUDNN(cudnnCreateTensorDescriptor(&input_descriptor));
setTensorDescriptor(input_descriptor, input->ndim, input->shape);
// creating output_grads descriptor
cudnnTensorDescriptor_t output_grads_descriptor;
checkCUDNN(cudnnCreateTensorDescriptor(&output_grads_descriptor));
setTensorDescriptor(output_grads_descriptor, output_grads->ndim, output_grads->shape);
// create convolution tensor
cudnnConvolutionDescriptor_t convolution_descriptor;
checkCUDNN(cudnnCreateConvolutionDescriptor(&convolution_descriptor));
checkCUDNN(cudnnSetConvolution2dDescriptor(convolution_descriptor,
/*pad_height=*/padding_height,
/*pad_width=*/padding_width,
/*vertical_stride=*/stride_height,
/*horizontal_stride=*/stride_width,
/*dilation_height=*/1,
/*dilation_width=*/1,
/*mode=*/CUDNN_CROSS_CORRELATION,
/*computeType=*/CUDNN_DATA_FLOAT));
// create filter tensors
cudnnFilterDescriptor_t filter_descriptor;
checkCUDNN(cudnnCreateFilterDescriptor(&filter_descriptor));
checkCUDNN(cudnnSetFilter4dDescriptor(filter_descriptor,
/*dataType=*/CUDNN_DATA_FLOAT,
/*format=*/CUDNN_TENSOR_NCHW,
/*out_channels=*/num_filters,
/*in_channels=*/num_outputs,
/*kernel_height=*/filter_height,
/*kernel_width=*/filter_width));
cudnnConvolutionBwdFilterAlgo_t backward_filter_algo;
checkCUDNN(cudnnGetConvolutionBackwardFilterAlgorithm(cudnn_handler,
input_descriptor,
output_grads_descriptor,
convolution_descriptor,
filter_descriptor,
CUDNN_CONVOLUTION_BWD_FILTER_PREFER_FASTEST,
0,
&backward_filter_algo));
size_t workspace_bytes{0};
checkCUDNN(cudnnGetConvolutionBackwardFilterWorkspaceSize(cudnn_handler,
input_descriptor,
output_grads_descriptor,
convolution_descriptor,
filter_descriptor,
backward_filter_algo,
&workspace_bytes));
void *d_workspace{nullptr};
cudaMalloc(&d_workspace, workspace_bytes);
const float alpha = 1.0f, beta = 0.0f;
checkCUDNN(cudnnConvolutionBackwardFilter(cudnn_handler,
&alpha,
input_descriptor,
input_data,
output_grads_descriptor,
output_grads_data,
convolution_descriptor,
backward_filter_algo,
d_workspace,
workspace_bytes,
&beta,
filter_descriptor,
filter_grad_data));
cudaFree(d_workspace);
cudnnDestroyTensorDescriptor(input_descriptor);
cudnnDestroyTensorDescriptor(output_grads_descriptor);
cudnnDestroyFilterDescriptor(filter_descriptor);
cudnnDestroyConvolutionDescriptor(convolution_descriptor);
return 0;
}
int cudnnPoolForward(const DLArrayHandle input,
const int pooling_height,
const int pooling_width,
const int stride_height,
const int stride_width,
const char *mode,
DLArrayHandle output) {
const int input_dim = input->ndim;
const int output_dim = output->ndim;
assert(input_dim == 4);
assert(output_dim == 4);
const float *input_data = (const float *) input->data;
float *output_data = (float *) output->data;
cudnnPoolingMode_t pooling_mode = CUDNN_POOLING_MAX;
std::string str_mode(mode);
if (str_mode.compare("average") == 0) {
std::cout << str_mode << std::endl;
pooling_mode = CUDNN_POOLING_AVERAGE_COUNT_INCLUDE_PADDING;
std::cout << pooling_mode << std::endl;
}
if (!cudnn_handler) {
cudnnCreate(&cudnn_handler);
}
// creating input and output tensors
cudnnTensorDescriptor_t input_descriptor;
checkCUDNN(cudnnCreateTensorDescriptor(&input_descriptor));
setTensorDescriptor(input_descriptor, input->ndim, input->shape);
cudnnTensorDescriptor_t output_descriptor;
checkCUDNN(cudnnCreateTensorDescriptor(&output_descriptor));
setTensorDescriptor(output_descriptor, output->ndim, output->shape);
cudnnPoolingDescriptor_t pooling_descriptor;
checkCUDNN(cudnnCreatePoolingDescriptor(&pooling_descriptor));
checkCUDNN(cudnnSetPooling2dDescriptor(pooling_descriptor,
pooling_mode,
CUDNN_PROPAGATE_NAN,
pooling_height,
pooling_width,
0, // TODO: parameterize vertical padding
0, // TODO: parameterize horizontal padding
stride_height,
stride_width));
const float alpha = 1.0f, beta = 0.0f;
checkCUDNN(cudnnPoolingForward(cudnn_handler,
pooling_descriptor,
&alpha,
input_descriptor,
input_data,
&beta,
output_descriptor,
output_data));
cudnnDestroyTensorDescriptor(input_descriptor);
cudnnDestroyTensorDescriptor(output_descriptor);
cudnnDestroyPoolingDescriptor(pooling_descriptor);
return 0;
}
int cudnnPoolBackward(const DLArrayHandle input,
const DLArrayHandle output_grads,
const DLArrayHandle output,
const int pooling_height,
const int pooling_width,
const int stride_height,
const int stride_width,
const char *mode,
DLArrayHandle pool_grad) {
const int input_dim = input->ndim;
const int output_dim = output->ndim;
const int output_grads_dim = output_grads->ndim;
const int pool_grad_dim = pool_grad->ndim;
assert(input_dim == 4);
assert(output_dim == 4);
assert(output_grads_dim == 4);
assert(pool_grad_dim == 4);
const float *input_data = (const float*) input->data;
const float *output_data = (const float*) output->data;
const float *output_grads_data = (const float*) output_grads->data;
float *pool_grad_data = (float*) pool_grad->data;
cudnnPoolingMode_t pooling_mode = CUDNN_POOLING_MAX;
std::string str_mode(mode);
if (str_mode.compare("average") == 0) {
std::cout << str_mode << std::endl;
pooling_mode = CUDNN_POOLING_AVERAGE_COUNT_INCLUDE_PADDING;
std::cout << pooling_mode << std::endl;
}
if (!cudnn_handler) {
cudnnCreate(&cudnn_handler);
}
// input descriptor
cudnnTensorDescriptor_t input_descriptor;
checkCUDNN(cudnnCreateTensorDescriptor(&input_descriptor));
setTensorDescriptor(input_descriptor, input->ndim, input->shape);
// ouput descriptor
cudnnTensorDescriptor_t output_descriptor;
checkCUDNN(cudnnCreateTensorDescriptor(&output_descriptor));
setTensorDescriptor(output_descriptor, output->ndim, output->shape);
// output grad descriptor
cudnnTensorDescriptor_t output_grad_descriptor;
checkCUDNN(cudnnCreateTensorDescriptor(&output_grad_descriptor));
setTensorDescriptor(output_grad_descriptor, output_grads->ndim, output_grads->shape);
// pool grad descriptor
cudnnTensorDescriptor_t pool_grad_descriptor;
checkCUDNN(cudnnCreateTensorDescriptor(&pool_grad_descriptor));
setTensorDescriptor(pool_grad_descriptor, pool_grad->ndim, pool_grad->shape);
// TODO: reuse already defined pooling descriptor in forward pass
cudnnPoolingDescriptor_t pooling_descriptor;
checkCUDNN(cudnnCreatePoolingDescriptor(&pooling_descriptor));
checkCUDNN(cudnnSetPooling2dDescriptor(pooling_descriptor,
pooling_mode,
CUDNN_PROPAGATE_NAN,
pooling_height,
pooling_width,
0, // TODO: parameterize vertical padding
0, // TODO: parameterize horizontal padding
stride_height,
stride_width));
const float alpha = 1.0f, beta = 0.0f;
checkCUDNN(cudnnPoolingBackward(cudnn_handler,
pooling_descriptor,
&alpha,
output_descriptor,
output_data,
output_grad_descriptor,
output_grads_data,
input_descriptor,
input_data,
&beta,
pool_grad_descriptor,
pool_grad_data));
cudnnDestroyTensorDescriptor(input_descriptor);
cudnnDestroyTensorDescriptor(output_descriptor);
cudnnDestroyTensorDescriptor(output_grad_descriptor);
cudnnDestroyTensorDescriptor(pool_grad_descriptor);
cudnnDestroyPoolingDescriptor(pooling_descriptor);
return 0;
}
|
3ae792c187d8707487729b5ded8c93a839eb75d9.hip | // !!! This is a file automatically generated by hipify!!!
#if !MEGDNN_TEGRA_X1
// generated by gen_cuda_conv_bias_kern_impls.py
// ignore warning of cutlass
#pragma GCC diagnostic push
#pragma GCC diagnostic ignored "-Wunused-parameter"
#pragma GCC diagnostic ignored "-Wstrict-aliasing"
#include "src/cuda/conv_bias/int8/conv_bias_int8_implicit_gemm_cutlass_wrapper.cuinl"
using LayoutSrc = cutlass::layout::TensorNCxHWx<4>;
using LayoutFilter = cutlass::layout::TensorCxRSKx<4>;
using LayoutDst = cutlass::layout::TensorNCxHWx<32>;
using ThreadBlockShape = cutlass::gemm::GemmShape<64, 64, 32>;
using WarpShape = cutlass::gemm::GemmShape<64, 32, 32>;
using InstructionShape = cutlass::gemm::GemmShape<1, 1, 4>;
using EpilogueOp = cutlass::epilogue::thread::BiasAddLinearCombinationClamp<
int8_t, 4, int32_t, int32_t, float>;
using Convolution = cutlass::conv::device::Convolution<
int8_t, LayoutSrc, int8_t, LayoutFilter, int8_t,
LayoutDst, int32_t, LayoutDst, int32_t,
cutlass::conv::ConvType::kConvolution, cutlass::arch::OpClassSimt, cutlass::arch::Sm61,
ThreadBlockShape, WarpShape, InstructionShape, EpilogueOp,
cutlass::conv::threadblock::ConvolutionFpropNCxHWxThreadblockSwizzle,
2, 4, 16, true,
cutlass::arch::OpMultiplyAddSaturate>;
template void megdnn::cuda::cutlass_wrapper::cutlass_convolution_wrapper<Convolution>(
const typename Convolution::ElementSrc* d_src,
const typename Convolution::ElementFilter* d_filter,
const typename Convolution::ElementBias* d_bias,
const typename Convolution::ElementDst* d_z,
typename Convolution::ElementDst* d_dst,
int* workspace,
typename Convolution::ConvolutionParameter const& conv_param,
typename Convolution::EpilogueOutputOp::Params const& epilogue,
hipStream_t stream);
#pragma GCC diagnostic pop
#endif
| 3ae792c187d8707487729b5ded8c93a839eb75d9.cu | #if !MEGDNN_TEGRA_X1
// generated by gen_cuda_conv_bias_kern_impls.py
// ignore warning of cutlass
#pragma GCC diagnostic push
#pragma GCC diagnostic ignored "-Wunused-parameter"
#pragma GCC diagnostic ignored "-Wstrict-aliasing"
#include "src/cuda/conv_bias/int8/conv_bias_int8_implicit_gemm_cutlass_wrapper.cuinl"
using LayoutSrc = cutlass::layout::TensorNCxHWx<4>;
using LayoutFilter = cutlass::layout::TensorCxRSKx<4>;
using LayoutDst = cutlass::layout::TensorNCxHWx<32>;
using ThreadBlockShape = cutlass::gemm::GemmShape<64, 64, 32>;
using WarpShape = cutlass::gemm::GemmShape<64, 32, 32>;
using InstructionShape = cutlass::gemm::GemmShape<1, 1, 4>;
using EpilogueOp = cutlass::epilogue::thread::BiasAddLinearCombinationClamp<
int8_t, 4, int32_t, int32_t, float>;
using Convolution = cutlass::conv::device::Convolution<
int8_t, LayoutSrc, int8_t, LayoutFilter, int8_t,
LayoutDst, int32_t, LayoutDst, int32_t,
cutlass::conv::ConvType::kConvolution, cutlass::arch::OpClassSimt, cutlass::arch::Sm61,
ThreadBlockShape, WarpShape, InstructionShape, EpilogueOp,
cutlass::conv::threadblock::ConvolutionFpropNCxHWxThreadblockSwizzle,
2, 4, 16, true,
cutlass::arch::OpMultiplyAddSaturate>;
template void megdnn::cuda::cutlass_wrapper::cutlass_convolution_wrapper<Convolution>(
const typename Convolution::ElementSrc* d_src,
const typename Convolution::ElementFilter* d_filter,
const typename Convolution::ElementBias* d_bias,
const typename Convolution::ElementDst* d_z,
typename Convolution::ElementDst* d_dst,
int* workspace,
typename Convolution::ConvolutionParameter const& conv_param,
typename Convolution::EpilogueOutputOp::Params const& epilogue,
cudaStream_t stream);
#pragma GCC diagnostic pop
#endif
|
1ecf43697dfd8c1e62bd9cf9213bfe7c8626ca8f.hip | // !!! This is a file automatically generated by hipify!!!
#define GRB_USE_CUDA
#define private public
#include <iostream>
#include <algorithm>
#include <string>
#include <cstdio>
#include <cstdlib>
// #include <hip/hip_runtime_api.h>
#include <boost/program_options.hpp>
#include "graphblas/graphblas.hpp"
#include "graphblas/algorithm/gc.hpp"
#include "test/test.hpp"
bool debug_;
bool memory_;
int main(int argc, char** argv) {
std::vector<graphblas::Index> row_indices;
std::vector<graphblas::Index> col_indices;
std::vector<int> values;
graphblas::Index nrows, ncols, nvals;
// Parse arguments
bool debug;
bool transpose;
bool mtxinfo;
int directed;
int niter;
int seed;
int max_colors;
int gc_algo;
char* dat_name;
po::variables_map vm;
// Read in sparse matrix
if (argc < 2) {
fprintf(stderr, "Usage: %s [matrix-market-filename]\n", argv[0]);
exit(1);
} else {
parseArgs(argc, argv, &vm);
debug = vm["debug" ].as<bool>();
transpose = vm["transpose"].as<bool>();
mtxinfo = vm["mtxinfo" ].as<bool>();
directed = vm["directed" ].as<int>();
niter = vm["niter" ].as<int>();
seed = vm["seed" ].as<int>();
max_colors = vm["maxcolors"].as<int>();
gc_algo = vm["gcalgo" ].as<int>();
// This is an imperfect solution, because this should happen in
// desc.loadArgs(vm) instead of application code!
// TODO(@ctcyang): fix this
readMtx(argv[argc-1], &row_indices, &col_indices, &values, &nrows, &ncols,
&nvals, directed, mtxinfo, &dat_name);
}
// Descriptor desc
graphblas::Descriptor desc;
CHECK(desc.loadArgs(vm));
if (transpose)
CHECK(desc.toggle(graphblas::GrB_INP1));
// Matrix A
graphblas::Matrix<int> a(nrows, ncols);
values.clear();
values.resize(nvals, 1.f);
CHECK(a.build(&row_indices, &col_indices, &values, nvals, GrB_NULL,
dat_name));
CHECK(a.nrows(&nrows));
CHECK(a.ncols(&ncols));
CHECK(a.nvals(&nvals));
if (debug) CHECK(a.print());
// Vector v
graphblas::Vector<int> v(nrows);
// Cpu graph coloring
CpuTimer gc_cpu;
std::vector<int> h_gc_cpu(nrows, 0);
int depth = 10000;
gc_cpu.Start();
int d = graphblas::algorithm::gcCpu(seed, &a, &h_gc_cpu, max_colors);
gc_cpu.Stop();
graphblas::algorithm::verifyGc(&a, h_gc_cpu);
// Warmup
CpuTimer warmup;
warmup.Start();
if (gc_algo == 0)
graphblas::algorithm::gcJP(&v, &a, seed, max_colors, &desc);
else if (gc_algo == 1)
graphblas::algorithm::gcMIS(&v, &a, seed, max_colors, &desc);
else if (gc_algo == 2)
graphblas::algorithm::gcIS(&v, &a, seed, max_colors, &desc);
else
std::cout << "Error: Invalid graph coloring algorithm selected!\n";
warmup.Stop();
std::vector<int> h_gc_gpu;
CHECK(v.extractTuples(&h_gc_gpu, &nrows));
graphblas::algorithm::verifyGc(&a, h_gc_gpu);
// Benchmark
graphblas::Vector<int> y(nrows);
CpuTimer vxm_gpu;
// hipProfilerStart();
vxm_gpu.Start();
float tight = 0.f;
float val;
for (int i = 0; i < niter; i++) {
if (gc_algo == 0) {
val = graphblas::algorithm::gcJP(&v, &a, seed, max_colors, &desc);
} else if (gc_algo == 1) {
val = graphblas::algorithm::gcMIS(&v, &a, seed, max_colors, &desc);
} else if (gc_algo == 2) {
val = graphblas::algorithm::gcIS(&v, &a, seed, max_colors, &desc);
} else {
std::cout << "Error: Invalid graph coloring algorithm selected!\n";
break;
}
tight += val;
}
// hipProfilerStop();
vxm_gpu.Stop();
float flop = 0;
std::cout << "cpu, " << gc_cpu.ElapsedMillis() << ", \n";
std::cout << "warmup, " << warmup.ElapsedMillis() << ", " <<
flop/warmup.ElapsedMillis()/1000000.0 << "\n";
float elapsed_vxm = vxm_gpu.ElapsedMillis();
std::cout << "tight, " << tight/niter << "\n";
std::cout << "vxm, " << elapsed_vxm/niter << "\n";
if (niter) {
std::vector<int> h_gc_gpu2;
graphblas::algorithm::verifyGc(&a, h_gc_gpu);
}
return 0;
}
| 1ecf43697dfd8c1e62bd9cf9213bfe7c8626ca8f.cu | #define GRB_USE_CUDA
#define private public
#include <iostream>
#include <algorithm>
#include <string>
#include <cstdio>
#include <cstdlib>
// #include <cuda_profiler_api.h>
#include <boost/program_options.hpp>
#include "graphblas/graphblas.hpp"
#include "graphblas/algorithm/gc.hpp"
#include "test/test.hpp"
bool debug_;
bool memory_;
int main(int argc, char** argv) {
std::vector<graphblas::Index> row_indices;
std::vector<graphblas::Index> col_indices;
std::vector<int> values;
graphblas::Index nrows, ncols, nvals;
// Parse arguments
bool debug;
bool transpose;
bool mtxinfo;
int directed;
int niter;
int seed;
int max_colors;
int gc_algo;
char* dat_name;
po::variables_map vm;
// Read in sparse matrix
if (argc < 2) {
fprintf(stderr, "Usage: %s [matrix-market-filename]\n", argv[0]);
exit(1);
} else {
parseArgs(argc, argv, &vm);
debug = vm["debug" ].as<bool>();
transpose = vm["transpose"].as<bool>();
mtxinfo = vm["mtxinfo" ].as<bool>();
directed = vm["directed" ].as<int>();
niter = vm["niter" ].as<int>();
seed = vm["seed" ].as<int>();
max_colors = vm["maxcolors"].as<int>();
gc_algo = vm["gcalgo" ].as<int>();
// This is an imperfect solution, because this should happen in
// desc.loadArgs(vm) instead of application code!
// TODO(@ctcyang): fix this
readMtx(argv[argc-1], &row_indices, &col_indices, &values, &nrows, &ncols,
&nvals, directed, mtxinfo, &dat_name);
}
// Descriptor desc
graphblas::Descriptor desc;
CHECK(desc.loadArgs(vm));
if (transpose)
CHECK(desc.toggle(graphblas::GrB_INP1));
// Matrix A
graphblas::Matrix<int> a(nrows, ncols);
values.clear();
values.resize(nvals, 1.f);
CHECK(a.build(&row_indices, &col_indices, &values, nvals, GrB_NULL,
dat_name));
CHECK(a.nrows(&nrows));
CHECK(a.ncols(&ncols));
CHECK(a.nvals(&nvals));
if (debug) CHECK(a.print());
// Vector v
graphblas::Vector<int> v(nrows);
// Cpu graph coloring
CpuTimer gc_cpu;
std::vector<int> h_gc_cpu(nrows, 0);
int depth = 10000;
gc_cpu.Start();
int d = graphblas::algorithm::gcCpu(seed, &a, &h_gc_cpu, max_colors);
gc_cpu.Stop();
graphblas::algorithm::verifyGc(&a, h_gc_cpu);
// Warmup
CpuTimer warmup;
warmup.Start();
if (gc_algo == 0)
graphblas::algorithm::gcJP(&v, &a, seed, max_colors, &desc);
else if (gc_algo == 1)
graphblas::algorithm::gcMIS(&v, &a, seed, max_colors, &desc);
else if (gc_algo == 2)
graphblas::algorithm::gcIS(&v, &a, seed, max_colors, &desc);
else
std::cout << "Error: Invalid graph coloring algorithm selected!\n";
warmup.Stop();
std::vector<int> h_gc_gpu;
CHECK(v.extractTuples(&h_gc_gpu, &nrows));
graphblas::algorithm::verifyGc(&a, h_gc_gpu);
// Benchmark
graphblas::Vector<int> y(nrows);
CpuTimer vxm_gpu;
// cudaProfilerStart();
vxm_gpu.Start();
float tight = 0.f;
float val;
for (int i = 0; i < niter; i++) {
if (gc_algo == 0) {
val = graphblas::algorithm::gcJP(&v, &a, seed, max_colors, &desc);
} else if (gc_algo == 1) {
val = graphblas::algorithm::gcMIS(&v, &a, seed, max_colors, &desc);
} else if (gc_algo == 2) {
val = graphblas::algorithm::gcIS(&v, &a, seed, max_colors, &desc);
} else {
std::cout << "Error: Invalid graph coloring algorithm selected!\n";
break;
}
tight += val;
}
// cudaProfilerStop();
vxm_gpu.Stop();
float flop = 0;
std::cout << "cpu, " << gc_cpu.ElapsedMillis() << ", \n";
std::cout << "warmup, " << warmup.ElapsedMillis() << ", " <<
flop/warmup.ElapsedMillis()/1000000.0 << "\n";
float elapsed_vxm = vxm_gpu.ElapsedMillis();
std::cout << "tight, " << tight/niter << "\n";
std::cout << "vxm, " << elapsed_vxm/niter << "\n";
if (niter) {
std::vector<int> h_gc_gpu2;
graphblas::algorithm::verifyGc(&a, h_gc_gpu);
}
return 0;
}
|
18c8ccb2ffb960d2ef07dba92477493b0ad51b15.hip | // !!! This is a file automatically generated by hipify!!!
/*
#include <iostream>
#include <stdlib.h>
#include <hip/hip_runtime.h>
//funcion llamadada por el host y ejecutada por el device
__global__ void suma(int a, int b, int c, int *resultado){
*resultado = a + b + c;
}
//funcion llamada y ejecutada por el host --> __host__
int main(int argc, char ** argv){C:\Users\Laura\AppData\Local\Temp\Rar$DIa0.869\book.h
int n1 = 3, n2 = 5, n3 = 3;
int *hst_c, *dev_c;
//reserva de memoria para el host y el device
hst_c = (int*)malloc(sizeof(int));
hipMalloc((void**)&dev_c, sizeof(int));
//llamada a la funcion del kernel, pasandole los datos
suma << <1, 1 >> >(n1, n2, n3, dev_c);
//copiamos los datos del device al host
hipMemcpy(hst_c, dev_c, sizeof(int), hipMemcpyDeviceToHost);
printf("El resultado de la operacion es \n%2d + %2d + %2d = %2d \n", n1, n2, n3, *hst_c);
printf("\n pulse INTRO para finalizar");
//limpiamos el buffer
fflush(stdin);
char intro = getchar();
return 0;
}
*/ | 18c8ccb2ffb960d2ef07dba92477493b0ad51b15.cu | /*
#include <iostream>
#include <stdlib.h>
#include <cuda_runtime.h>
//funcion llamadada por el host y ejecutada por el device
__global__ void suma(int a, int b, int c, int *resultado){
*resultado = a + b + c;
}
//funcion llamada y ejecutada por el host --> __host__
int main(int argc, char ** argv){C:\Users\Laura\AppData\Local\Temp\Rar$DIa0.869\book.h
int n1 = 3, n2 = 5, n3 = 3;
int *hst_c, *dev_c;
//reserva de memoria para el host y el device
hst_c = (int*)malloc(sizeof(int));
cudaMalloc((void**)&dev_c, sizeof(int));
//llamada a la funcion del kernel, pasandole los datos
suma << <1, 1 >> >(n1, n2, n3, dev_c);
//copiamos los datos del device al host
cudaMemcpy(hst_c, dev_c, sizeof(int), cudaMemcpyDeviceToHost);
printf("El resultado de la operacion es \n%2d + %2d + %2d = %2d \n", n1, n2, n3, *hst_c);
printf("\n pulse INTRO para finalizar");
//limpiamos el buffer
fflush(stdin);
char intro = getchar();
return 0;
}
*/ |
a2ceddb4fd83acd6a45cf5437e00a1cb4bd67a43.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <algorithm>
#include <vector>
#include "caffe/layers/rrelu_layer.hpp"
namespace caffe {
template <typename Dtype>
__global__ void RReLUForward(const int n, const Dtype* in, Dtype* out,
const Dtype *negative_slope) {
CUDA_KERNEL_LOOP(index, n) {
out[index] = in[index] > 0 ? in[index] : in[index] * Dtype(1.0) / negative_slope[index];
}
}
template <typename Dtype>
__global__ void RReLUForward(const int n, const Dtype* in, Dtype* out,
Dtype negative_slope) {
CUDA_KERNEL_LOOP(index, n) {
out[index] = in[index] > 0 ? in[index] : in[index] * negative_slope;
}
}
template <typename Dtype>
void RReLULayer<Dtype>::Forward_gpu(const vector<Blob<Dtype>*>& bottom,
const vector<Blob<Dtype>*>& top) {
const Dtype* bottom_data = bottom[0]->gpu_data();
Dtype* top_data = top[0]->mutable_gpu_data();
const int count = bottom[0]->count();
Dtype negative_slope_lower = this->layer_param_.rrelu_param().negative_slope_lower();
Dtype negative_slope_upper = this->layer_param_.rrelu_param().negative_slope_upper();
if (this->phase_ == TRAIN) {
Dtype* mask =
static_cast<Dtype*>(rand_vec_.mutable_gpu_data());
caffe_gpu_rng_uniform(count, negative_slope_lower, negative_slope_upper,mask);
// NOLINT_NEXT_LINE(whitespace/operators)
hipLaunchKernelGGL(( RReLUForward<Dtype>), dim3(CAFFE_GET_BLOCKS(count)), dim3(CAFFE_CUDA_NUM_THREADS), 0, 0,
count, bottom_data, top_data, mask);
CUDA_POST_KERNEL_CHECK;
}
else
{
Dtype negative_slope = Dtype(2)/(negative_slope_lower+negative_slope_upper);
// NOLINT_NEXT_LINE(whitespace/operators)
hipLaunchKernelGGL(( RReLUForward<Dtype>), dim3(CAFFE_GET_BLOCKS(count)), dim3(CAFFE_CUDA_NUM_THREADS), 0, 0,
count, bottom_data, top_data, negative_slope);
CUDA_POST_KERNEL_CHECK;
}
// << " count: " << count << " bottom_data: "
// << (unsigned long)bottom_data
// << " top_data: " << (unsigned long)top_data
// << " blocks: " << CAFFE_GET_BLOCKS(count)
// << " threads: " << CAFFE_CUDA_NUM_THREADS;
}
template <typename Dtype>
__global__ void RReLUBackward(const int n, const Dtype* in_diff,
const Dtype* in_data, Dtype* out_diff, const Dtype *negative_slope) {
CUDA_KERNEL_LOOP(index, n) {
out_diff[index] = in_diff[index] * ((in_data[index] > 0)
+ (in_data[index] <= 0) * Dtype(1.0) / negative_slope[index]);
}
}
template <typename Dtype>
__global__ void RReLUBackward(const int n, const Dtype* in_diff,
const Dtype* in_data, Dtype* out_diff, Dtype negative_slope) {
CUDA_KERNEL_LOOP(index, n) {
out_diff[index] = in_diff[index] * ((in_data[index] > 0)
+ (in_data[index] <= 0) * negative_slope);
}
}
template <typename Dtype>
void RReLULayer<Dtype>::Backward_gpu(const vector<Blob<Dtype>*>& top,
const vector<bool>& propagate_down,
const vector<Blob<Dtype>*>& bottom) {
if (propagate_down[0]) {
const Dtype* bottom_data = bottom[0]->gpu_data();
const Dtype* top_diff = top[0]->gpu_diff();
Dtype* bottom_diff = bottom[0]->mutable_gpu_diff();
const int count = bottom[0]->count();
Dtype negative_slope_lower = this->layer_param_.rrelu_param().negative_slope_lower();
Dtype negative_slope_upper = this->layer_param_.rrelu_param().negative_slope_upper();
if (this->phase_ == TRAIN) {
Dtype* mask =
static_cast<Dtype*>(rand_vec_.mutable_gpu_data());
// NOLINT_NEXT_LINE(whitespace/operators)
hipLaunchKernelGGL(( RReLUBackward<Dtype>), dim3(CAFFE_GET_BLOCKS(count)), dim3(CAFFE_CUDA_NUM_THREADS), 0, 0,
count, top_diff, bottom_data, bottom_diff, mask);
CUDA_POST_KERNEL_CHECK;
}
else
{
Dtype negative_slope = Dtype(2)/(negative_slope_lower+negative_slope_upper);
// NOLINT_NEXT_LINE(whitespace/operators)
hipLaunchKernelGGL(( RReLUBackward<Dtype>), dim3(CAFFE_GET_BLOCKS(count)), dim3(CAFFE_CUDA_NUM_THREADS), 0, 0,
count, top_diff, bottom_data, bottom_diff, negative_slope);
CUDA_POST_KERNEL_CHECK;
}
}
}
INSTANTIATE_LAYER_GPU_FUNCS(RReLULayer);
} // namespace caffe
| a2ceddb4fd83acd6a45cf5437e00a1cb4bd67a43.cu | #include <algorithm>
#include <vector>
#include "caffe/layers/rrelu_layer.hpp"
namespace caffe {
template <typename Dtype>
__global__ void RReLUForward(const int n, const Dtype* in, Dtype* out,
const Dtype *negative_slope) {
CUDA_KERNEL_LOOP(index, n) {
out[index] = in[index] > 0 ? in[index] : in[index] * Dtype(1.0) / negative_slope[index];
}
}
template <typename Dtype>
__global__ void RReLUForward(const int n, const Dtype* in, Dtype* out,
Dtype negative_slope) {
CUDA_KERNEL_LOOP(index, n) {
out[index] = in[index] > 0 ? in[index] : in[index] * negative_slope;
}
}
template <typename Dtype>
void RReLULayer<Dtype>::Forward_gpu(const vector<Blob<Dtype>*>& bottom,
const vector<Blob<Dtype>*>& top) {
const Dtype* bottom_data = bottom[0]->gpu_data();
Dtype* top_data = top[0]->mutable_gpu_data();
const int count = bottom[0]->count();
Dtype negative_slope_lower = this->layer_param_.rrelu_param().negative_slope_lower();
Dtype negative_slope_upper = this->layer_param_.rrelu_param().negative_slope_upper();
if (this->phase_ == TRAIN) {
Dtype* mask =
static_cast<Dtype*>(rand_vec_.mutable_gpu_data());
caffe_gpu_rng_uniform(count, negative_slope_lower, negative_slope_upper,mask);
// NOLINT_NEXT_LINE(whitespace/operators)
RReLUForward<Dtype><<<CAFFE_GET_BLOCKS(count), CAFFE_CUDA_NUM_THREADS>>>(
count, bottom_data, top_data, mask);
CUDA_POST_KERNEL_CHECK;
}
else
{
Dtype negative_slope = Dtype(2)/(negative_slope_lower+negative_slope_upper);
// NOLINT_NEXT_LINE(whitespace/operators)
RReLUForward<Dtype><<<CAFFE_GET_BLOCKS(count), CAFFE_CUDA_NUM_THREADS>>>(
count, bottom_data, top_data, negative_slope);
CUDA_POST_KERNEL_CHECK;
}
// << " count: " << count << " bottom_data: "
// << (unsigned long)bottom_data
// << " top_data: " << (unsigned long)top_data
// << " blocks: " << CAFFE_GET_BLOCKS(count)
// << " threads: " << CAFFE_CUDA_NUM_THREADS;
}
template <typename Dtype>
__global__ void RReLUBackward(const int n, const Dtype* in_diff,
const Dtype* in_data, Dtype* out_diff, const Dtype *negative_slope) {
CUDA_KERNEL_LOOP(index, n) {
out_diff[index] = in_diff[index] * ((in_data[index] > 0)
+ (in_data[index] <= 0) * Dtype(1.0) / negative_slope[index]);
}
}
template <typename Dtype>
__global__ void RReLUBackward(const int n, const Dtype* in_diff,
const Dtype* in_data, Dtype* out_diff, Dtype negative_slope) {
CUDA_KERNEL_LOOP(index, n) {
out_diff[index] = in_diff[index] * ((in_data[index] > 0)
+ (in_data[index] <= 0) * negative_slope);
}
}
template <typename Dtype>
void RReLULayer<Dtype>::Backward_gpu(const vector<Blob<Dtype>*>& top,
const vector<bool>& propagate_down,
const vector<Blob<Dtype>*>& bottom) {
if (propagate_down[0]) {
const Dtype* bottom_data = bottom[0]->gpu_data();
const Dtype* top_diff = top[0]->gpu_diff();
Dtype* bottom_diff = bottom[0]->mutable_gpu_diff();
const int count = bottom[0]->count();
Dtype negative_slope_lower = this->layer_param_.rrelu_param().negative_slope_lower();
Dtype negative_slope_upper = this->layer_param_.rrelu_param().negative_slope_upper();
if (this->phase_ == TRAIN) {
Dtype* mask =
static_cast<Dtype*>(rand_vec_.mutable_gpu_data());
// NOLINT_NEXT_LINE(whitespace/operators)
RReLUBackward<Dtype><<<CAFFE_GET_BLOCKS(count), CAFFE_CUDA_NUM_THREADS>>>(
count, top_diff, bottom_data, bottom_diff, mask);
CUDA_POST_KERNEL_CHECK;
}
else
{
Dtype negative_slope = Dtype(2)/(negative_slope_lower+negative_slope_upper);
// NOLINT_NEXT_LINE(whitespace/operators)
RReLUBackward<Dtype><<<CAFFE_GET_BLOCKS(count), CAFFE_CUDA_NUM_THREADS>>>(
count, top_diff, bottom_data, bottom_diff, negative_slope);
CUDA_POST_KERNEL_CHECK;
}
}
}
INSTANTIATE_LAYER_GPU_FUNCS(RReLULayer);
} // namespace caffe
|
2797c90c923aac5ead56026e35c66afa2df3fcf4.hip | // !!! This is a file automatically generated by hipify!!!
// -----------------------------------------------------------------------------
// Gunrock -- Fast and Efficient GPU Graph Library
// -----------------------------------------------------------------------------
// This source code is distributed under the terms of LICENSE.TXT
// in the root directory of this source distribution.
// -----------------------------------------------------------------------------
/**
* @file
* test_mst.cu
*
* @brief Simple test driver for computing Minimum Spanning Tree.
*/
#include <stdio.h>
#include <string>
#include <deque>
#include <vector>
#include <utility>
#include <iostream>
#include <cstdlib>
#include <algorithm>
// Utilities and correctness-checking
#include <gunrock/util/test_utils.cuh>
// Graph construction utilities
#include <gunrock/graphio/market.cuh>
// MST includes
#include <gunrock/app/mst/mst_enactor.cuh>
#include <gunrock/app/mst/mst_problem.cuh>
#include <gunrock/app/mst/mst_functor.cuh>
// Operator includes
#include <gunrock/oprtr/advance/kernel.cuh>
#include <gunrock/oprtr/filter/kernel.cuh>
#include <moderngpu.cuh>
// CPU Kruskal MST reference
#include <boost/config.hpp>
#include <boost/graph/adjacency_list.hpp>
#include <boost/graph/kruskal_min_spanning_tree.hpp>
using namespace gunrock;
using namespace gunrock::util;
using namespace gunrock::oprtr;
using namespace gunrock::app::mst;
////////////////////////////////////////////////////////////////////////////////
// defines, constants, globals
bool g_verbose;
bool g_undirected;
bool g_quick;
bool g_stream_from_host;
////////////////////////////////////////////////////////////////////////////////
// housekeeping routines
void Usage()
{
printf(
"\ntest_mst <graph type> <graph type args> [--device=<device_index>] "
"[--instrumented] [--quick] "
"[--v]\n"
"\n"
"Graph types and args:\n"
" market [<file>]\n"
" Reads a Matrix-Market coordinate-format graph of directed/undirected\n"
" edges from stdin (or from the optionally-specified file)\n"
" --device=<device_index> Set GPU device for running the graph primitive\n"
" --instrumented If set then kernels keep track of queue-search_depth\n"
" and barrier duty (a relative indicator of load imbalance)\n"
" --quick If set will skip the CPU validation code\n"
" --v If set will enable DEBUG mode\n\n"
" --------------------------------------------------------------\n"
" To make sure two graphs have same weight value for each edge \n"
" we have to change ll_value = rand()%%64 in market.cuh file to \n"
" some NON-RANDOM value if the original graph does NOT contain \n"
" weight per edge. Note it only support FULLY-CONNECTED graphs \n"
" --------------------------------------------------------------\n");
}
/**
* @brief Displays the MST result
*
*/
////////////////////////////////////////////////////////////////////////////////
template<typename VertexId, typename Value, typename SizeT>
void DisplaySolution(const Csr<VertexId, Value, SizeT> &graph, int *mst_output)
{
fflush(stdout);
// find source vertex ids for display results
VertexId *source = new VertexId[graph.edges];
for (int i = 0; i < graph.nodes; ++i)
{
for (int j = graph.row_offsets[i]; j < graph.row_offsets[i+1]; ++j)
{
source[j] = i;
}
}
// print source-destination pairs of minimum spanning tree edges
printf("src dst\n");
for (int i = 0; i < graph.edges; ++i)
{
if (mst_output[i] == 1)
{
printf("%d %d\n", source[i], graph.column_indices[i]);
}
}
// clean up if necessary
if (source) { delete [] source; }
}
/**
* @brief A simple CPU-based reference MST implementation.
*
* @tparam VertexId
* @tparam Value
* @tparam SizeT
*
* @param[in] edge_values weight value associated with per edge
* @param[in] graph reference to the CSR graph we process on
*
* \return long long int variable which indicates the total weight of the graph
*/
////////////////////////////////////////////////////////////////////////////////
template<typename VertexId, typename Value, typename SizeT>
long long int SimpleReferenceMST(
const Value *edge_values, const Csr<VertexId, Value, SizeT> &graph)
{
printf("REFERENCE TEST - #NODES: %d #EDGES: %d\n", graph.nodes, graph.edges);
// Kruskal minimum spanning tree preparations
using namespace boost;
typedef adjacency_list < vecS, vecS, undirectedS,
no_property, property < edge_weight_t, int > > Graph;
typedef graph_traits < Graph >::edge_descriptor Edge;
typedef graph_traits < Graph >::vertex_descriptor Vertex;
typedef std::pair<int, int> E;
E *edge_pairs = new E[graph.edges];
int idx = 0;
for (int i = 0; i < graph.nodes; ++i)
{
for (int j = graph.row_offsets[i]; j < graph.row_offsets[i+1]; ++j)
{
edge_pairs[idx++] = std::make_pair(i, graph.column_indices[j]);
}
}
Graph g(edge_pairs, edge_pairs + graph.edges, edge_values, graph.nodes);
property_map < Graph, edge_weight_t >::type weight = get(edge_weight, g);
std::vector < Edge > spanning_tree;
CpuTimer cpu_timer; // record the kernel running time
cpu_timer.Start();
// compute reference using kruskal_min_spanning_tree algorithm
kruskal_minimum_spanning_tree(g, std::back_inserter(spanning_tree));
cpu_timer.Stop();
float elapsed_cpu = cpu_timer.ElapsedMillis();
// analyze reference results
SizeT num_selected_cpu = 0;
long long int total_weight_cpu = 0;
if (graph.nodes <= 50) printf("CPU Minimum Spanning Tree\n");
for (std::vector < Edge >::iterator ei = spanning_tree.begin();
ei != spanning_tree.end(); ++ei)
{
if (graph.nodes <= 50)
{
// print the edge pairs in the minimum spanning tree
printf("%ld %ld\n", source(*ei, g), target(*ei, g));
//printf(" with weight of %d\n", weight[*ei]);
}
++num_selected_cpu;
total_weight_cpu += weight[*ei];
}
// clean up if necessary
if (edge_pairs) { delete [] edge_pairs; }
printf("CPU - Computation Complete in %lf msec.\n", elapsed_cpu);
printf("CPU - Number of Edges in MST: %d\n", num_selected_cpu);
return total_weight_cpu;
}
/**
* @brief Run MST tests
*
* @tparam VertexId
* @tparam Value
* @tparam SizeT
* @tparam INSTRUMENT
*
* @param[in] graph_gpu the CSR graph we send to GPU to process
* @param[in] graph_cpu the CSR graph we process on CPU
* @param[in] max_grid_size Maximum CTA occupancy
* @param[in] num_gpus Number of GPUs
* @param[in] context CudaContext for moderngpu to use
*
*/
////////////////////////////////////////////////////////////////////////////////
template <typename VertexId, typename Value, typename SizeT, bool INSTRUMENT>
void RunTests(
const Csr<VertexId, Value, SizeT> &graph_gpu,
const Csr<VertexId, Value, SizeT> &graph_cpu,
int max_grid_size,
int num_gpus,
mgpu::CudaContext& context)
{
printf("\nMINIMUM SPANNING TREE TEST - #NODES: %d #EDGES: %d\n",
graph_gpu.nodes, graph_gpu.edges);
// define the problem data structure for graph primitive
typedef MSTProblem<VertexId, SizeT, Value, true> Problem;
// INSTRUMENT specifies whether we want to keep such statistical data
// allocate MST enactor map
MSTEnactor<INSTRUMENT> mst_enactor(g_verbose);
// allocate problem on GPU create a pointer of the MSTProblem type
Problem * mst_problem = new Problem;
// host results spaces
VertexId * h_mst_output = new VertexId[graph_gpu.edges];
// copy data from CPU to GPU initialize data members in DataSlice
util::GRError(mst_problem->Init(g_stream_from_host, graph_gpu, num_gpus),
"Problem MST Initialization Failed", __FILE__, __LINE__);
// reset values in DataSlice
util::GRError(mst_problem->Reset(mst_enactor.GetFrontierType()),
"MST Problem Data Reset Failed", __FILE__, __LINE__);
// perform MST
GpuTimer gpu_timer; // record the kernel running time
gpu_timer.Start();
// launch MST Enactor
util::GRError(mst_enactor.template Enact<Problem>(
context, mst_problem, max_grid_size),
"MST Problem Enact Failed", __FILE__, __LINE__);
gpu_timer.Stop();
float elapsed_gpu = gpu_timer.ElapsedMillis();
printf("GPU - Computation Complete in %lf msec.\n", elapsed_gpu);
// copy results back to CPU from GPU using Extract
util::GRError(mst_problem->Extract(h_mst_output),
"MST Problem Data Extraction Failed", __FILE__, __LINE__);
// calculate GPU final number of selected edges
int num_selected_gpu = 0;
for (int iter = 0; iter < graph_gpu.edges; ++iter)
{
num_selected_gpu += h_mst_output[iter];
//printf("%d ", h_mst_output[iter]);
//printf("edge_%d: %d \n", iter, h_mst_output[iter]);
}
printf("\nGPU - Number of Edges in MST: %d\n", num_selected_gpu);
// calculate GPU total selected MST weights for validation
long long int total_weight_gpu = 0;
for (int iter = 0; iter < graph_gpu.edges; ++iter)
{
total_weight_gpu += h_mst_output[iter] * graph_gpu.edge_values[iter];
}
// correctness validation
long long int total_weight_cpu =
SimpleReferenceMST(graph_cpu.edge_values, graph_cpu);
if (total_weight_cpu == total_weight_gpu)
{
if (graph_gpu.nodes <= 50)
{
printf("GPU Minimum Spanning Tree\n");
// print the edge pairs in the minimum spanning tree
DisplaySolution(graph_gpu, h_mst_output);
}
printf("\nCORRECT.\n");
}
else
{
printf("INCORRECT. \n"
"CPU Computed Total Weight = %lld\n"
"GPU Computed Total Weight = %lld\n",
total_weight_cpu, total_weight_gpu);
}
// clean up if necessary
if (mst_problem) delete mst_problem;
if (h_mst_output) delete [] h_mst_output;
hipDeviceSynchronize();
}
/**
* @brief RunTests entry
*
* @tparam VertexId
* @tparam Value
* @tparam SizeT
*
* @param[in] graph_gpu the CSR graph we process on
* @param[in] graph_cpu the CSR graph used for reference
* @param[in] args Reference to the command line arguments
* @param[in] context modern GPU CUDA context
*/
template <typename VertexId, typename Value, typename SizeT>
void RunTests(
const Csr<VertexId, Value, SizeT> &graph_gpu,
const Csr<VertexId, Value, SizeT> &graph_cpu,
CommandLineArgs &args,
mgpu::CudaContext& context)
{
bool instrumented = false; // do not collect instrumentation from kernels
int max_grid_size = 0; // maximum grid size (up to the enactor)
int num_gpus = 1; // number of GPUs for multi-gpu enactor to use
instrumented = args.CheckCmdLineFlag("instrumented");
g_quick = args.CheckCmdLineFlag("quick");
g_verbose = args.CheckCmdLineFlag("v");
if (instrumented)
{
RunTests<VertexId, Value, SizeT, true>(
graph_gpu, graph_cpu, max_grid_size, num_gpus, context);
}
else
{
RunTests<VertexId, Value, SizeT, false>(
graph_gpu, graph_cpu, max_grid_size, num_gpus, context);
}
}
////////////////////////////////////////////////////////////////////////////////
int main(int argc, char** argv)
{
CommandLineArgs args(argc, argv);
if ((argc < 3) || (args.CheckCmdLineFlag("help")))
{
Usage();
return 1;
}
int dev = 0;
args.GetCmdLineArgument("device", dev);
mgpu::ContextPtr context = mgpu::CreateCudaDevice(dev);
// parse graph-construction parameters
g_undirected = true;
std::string graph_type = argv[1];
int flags = args.ParsedArgc();
int graph_args = argc - flags - 1;
if (graph_args < 1)
{
Usage();
return 1;
}
//
// construct graph and perform algorithm
//
if (graph_type == "market")
{
// matrix-market coordinate-formatted graph file
typedef int VertexId; // use as the vertex identifier type
typedef int Value; // use as the value type
typedef int SizeT; // use as the graph size type
// default value for stream_from_host is false
if (graph_args < 1)
{
Usage();
return 1;
}
char * market_filename = (graph_args == 2) ? argv[2] : NULL;
// buildMarketGraph() reads a .mtx file into CSR data structure
// template argument = true because the graph has edge values
Csr<VertexId, Value, SizeT> csr_gpu(false);
if (graphio::BuildMarketGraph<true>(
market_filename,
csr_gpu,
g_undirected,
false) != 0) { return 1; }
// boost MST algorithm requires directed graph input
Csr<VertexId, Value, SizeT> csr_cpu(false);
if (graphio::BuildMarketGraph<true>(
market_filename,
csr_cpu,
g_undirected,
false) != 0) { return 1; }
// display graph
// csr_gpu.DisplayGraph();
// csr_cpu.DisplayGraph();
/***************************************************************
* To make sure two graphs have same weight value for each edge *
* we have to change ll_value = rand()%64 in market.cuh file to *
* some NON-RANDOM value if the original graph does NOT contain *
* weight per edge. Note it only support FULLY-CONNECTED graphs *
***************************************************************/
// run GPU tests
RunTests(csr_gpu, csr_cpu, args, *context);
}
else
{
// unknown graph type
fprintf(stderr, "Unspecified graph type\n");
return 1;
}
return 0;
}
// Leave this at the end of the file
// Local Variables:
// mode:c++
// c-file-style: "NVIDIA"
// End: | 2797c90c923aac5ead56026e35c66afa2df3fcf4.cu | // -----------------------------------------------------------------------------
// Gunrock -- Fast and Efficient GPU Graph Library
// -----------------------------------------------------------------------------
// This source code is distributed under the terms of LICENSE.TXT
// in the root directory of this source distribution.
// -----------------------------------------------------------------------------
/**
* @file
* test_mst.cu
*
* @brief Simple test driver for computing Minimum Spanning Tree.
*/
#include <stdio.h>
#include <string>
#include <deque>
#include <vector>
#include <utility>
#include <iostream>
#include <cstdlib>
#include <algorithm>
// Utilities and correctness-checking
#include <gunrock/util/test_utils.cuh>
// Graph construction utilities
#include <gunrock/graphio/market.cuh>
// MST includes
#include <gunrock/app/mst/mst_enactor.cuh>
#include <gunrock/app/mst/mst_problem.cuh>
#include <gunrock/app/mst/mst_functor.cuh>
// Operator includes
#include <gunrock/oprtr/advance/kernel.cuh>
#include <gunrock/oprtr/filter/kernel.cuh>
#include <moderngpu.cuh>
// CPU Kruskal MST reference
#include <boost/config.hpp>
#include <boost/graph/adjacency_list.hpp>
#include <boost/graph/kruskal_min_spanning_tree.hpp>
using namespace gunrock;
using namespace gunrock::util;
using namespace gunrock::oprtr;
using namespace gunrock::app::mst;
////////////////////////////////////////////////////////////////////////////////
// defines, constants, globals
bool g_verbose;
bool g_undirected;
bool g_quick;
bool g_stream_from_host;
////////////////////////////////////////////////////////////////////////////////
// housekeeping routines
void Usage()
{
printf(
"\ntest_mst <graph type> <graph type args> [--device=<device_index>] "
"[--instrumented] [--quick] "
"[--v]\n"
"\n"
"Graph types and args:\n"
" market [<file>]\n"
" Reads a Matrix-Market coordinate-format graph of directed/undirected\n"
" edges from stdin (or from the optionally-specified file)\n"
" --device=<device_index> Set GPU device for running the graph primitive\n"
" --instrumented If set then kernels keep track of queue-search_depth\n"
" and barrier duty (a relative indicator of load imbalance)\n"
" --quick If set will skip the CPU validation code\n"
" --v If set will enable DEBUG mode\n\n"
" --------------------------------------------------------------\n"
" To make sure two graphs have same weight value for each edge \n"
" we have to change ll_value = rand()%%64 in market.cuh file to \n"
" some NON-RANDOM value if the original graph does NOT contain \n"
" weight per edge. Note it only support FULLY-CONNECTED graphs \n"
" --------------------------------------------------------------\n");
}
/**
* @brief Displays the MST result
*
*/
////////////////////////////////////////////////////////////////////////////////
template<typename VertexId, typename Value, typename SizeT>
void DisplaySolution(const Csr<VertexId, Value, SizeT> &graph, int *mst_output)
{
fflush(stdout);
// find source vertex ids for display results
VertexId *source = new VertexId[graph.edges];
for (int i = 0; i < graph.nodes; ++i)
{
for (int j = graph.row_offsets[i]; j < graph.row_offsets[i+1]; ++j)
{
source[j] = i;
}
}
// print source-destination pairs of minimum spanning tree edges
printf("src dst\n");
for (int i = 0; i < graph.edges; ++i)
{
if (mst_output[i] == 1)
{
printf("%d %d\n", source[i], graph.column_indices[i]);
}
}
// clean up if necessary
if (source) { delete [] source; }
}
/**
* @brief A simple CPU-based reference MST implementation.
*
* @tparam VertexId
* @tparam Value
* @tparam SizeT
*
* @param[in] edge_values weight value associated with per edge
* @param[in] graph reference to the CSR graph we process on
*
* \return long long int variable which indicates the total weight of the graph
*/
////////////////////////////////////////////////////////////////////////////////
template<typename VertexId, typename Value, typename SizeT>
long long int SimpleReferenceMST(
const Value *edge_values, const Csr<VertexId, Value, SizeT> &graph)
{
printf("REFERENCE TEST - #NODES: %d #EDGES: %d\n", graph.nodes, graph.edges);
// Kruskal minimum spanning tree preparations
using namespace boost;
typedef adjacency_list < vecS, vecS, undirectedS,
no_property, property < edge_weight_t, int > > Graph;
typedef graph_traits < Graph >::edge_descriptor Edge;
typedef graph_traits < Graph >::vertex_descriptor Vertex;
typedef std::pair<int, int> E;
E *edge_pairs = new E[graph.edges];
int idx = 0;
for (int i = 0; i < graph.nodes; ++i)
{
for (int j = graph.row_offsets[i]; j < graph.row_offsets[i+1]; ++j)
{
edge_pairs[idx++] = std::make_pair(i, graph.column_indices[j]);
}
}
Graph g(edge_pairs, edge_pairs + graph.edges, edge_values, graph.nodes);
property_map < Graph, edge_weight_t >::type weight = get(edge_weight, g);
std::vector < Edge > spanning_tree;
CpuTimer cpu_timer; // record the kernel running time
cpu_timer.Start();
// compute reference using kruskal_min_spanning_tree algorithm
kruskal_minimum_spanning_tree(g, std::back_inserter(spanning_tree));
cpu_timer.Stop();
float elapsed_cpu = cpu_timer.ElapsedMillis();
// analyze reference results
SizeT num_selected_cpu = 0;
long long int total_weight_cpu = 0;
if (graph.nodes <= 50) printf("CPU Minimum Spanning Tree\n");
for (std::vector < Edge >::iterator ei = spanning_tree.begin();
ei != spanning_tree.end(); ++ei)
{
if (graph.nodes <= 50)
{
// print the edge pairs in the minimum spanning tree
printf("%ld %ld\n", source(*ei, g), target(*ei, g));
//printf(" with weight of %d\n", weight[*ei]);
}
++num_selected_cpu;
total_weight_cpu += weight[*ei];
}
// clean up if necessary
if (edge_pairs) { delete [] edge_pairs; }
printf("CPU - Computation Complete in %lf msec.\n", elapsed_cpu);
printf("CPU - Number of Edges in MST: %d\n", num_selected_cpu);
return total_weight_cpu;
}
/**
* @brief Run MST tests
*
* @tparam VertexId
* @tparam Value
* @tparam SizeT
* @tparam INSTRUMENT
*
* @param[in] graph_gpu the CSR graph we send to GPU to process
* @param[in] graph_cpu the CSR graph we process on CPU
* @param[in] max_grid_size Maximum CTA occupancy
* @param[in] num_gpus Number of GPUs
* @param[in] context CudaContext for moderngpu to use
*
*/
////////////////////////////////////////////////////////////////////////////////
template <typename VertexId, typename Value, typename SizeT, bool INSTRUMENT>
void RunTests(
const Csr<VertexId, Value, SizeT> &graph_gpu,
const Csr<VertexId, Value, SizeT> &graph_cpu,
int max_grid_size,
int num_gpus,
mgpu::CudaContext& context)
{
printf("\nMINIMUM SPANNING TREE TEST - #NODES: %d #EDGES: %d\n",
graph_gpu.nodes, graph_gpu.edges);
// define the problem data structure for graph primitive
typedef MSTProblem<VertexId, SizeT, Value, true> Problem;
// INSTRUMENT specifies whether we want to keep such statistical data
// allocate MST enactor map
MSTEnactor<INSTRUMENT> mst_enactor(g_verbose);
// allocate problem on GPU create a pointer of the MSTProblem type
Problem * mst_problem = new Problem;
// host results spaces
VertexId * h_mst_output = new VertexId[graph_gpu.edges];
// copy data from CPU to GPU initialize data members in DataSlice
util::GRError(mst_problem->Init(g_stream_from_host, graph_gpu, num_gpus),
"Problem MST Initialization Failed", __FILE__, __LINE__);
// reset values in DataSlice
util::GRError(mst_problem->Reset(mst_enactor.GetFrontierType()),
"MST Problem Data Reset Failed", __FILE__, __LINE__);
// perform MST
GpuTimer gpu_timer; // record the kernel running time
gpu_timer.Start();
// launch MST Enactor
util::GRError(mst_enactor.template Enact<Problem>(
context, mst_problem, max_grid_size),
"MST Problem Enact Failed", __FILE__, __LINE__);
gpu_timer.Stop();
float elapsed_gpu = gpu_timer.ElapsedMillis();
printf("GPU - Computation Complete in %lf msec.\n", elapsed_gpu);
// copy results back to CPU from GPU using Extract
util::GRError(mst_problem->Extract(h_mst_output),
"MST Problem Data Extraction Failed", __FILE__, __LINE__);
// calculate GPU final number of selected edges
int num_selected_gpu = 0;
for (int iter = 0; iter < graph_gpu.edges; ++iter)
{
num_selected_gpu += h_mst_output[iter];
//printf("%d ", h_mst_output[iter]);
//printf("edge_%d: %d \n", iter, h_mst_output[iter]);
}
printf("\nGPU - Number of Edges in MST: %d\n", num_selected_gpu);
// calculate GPU total selected MST weights for validation
long long int total_weight_gpu = 0;
for (int iter = 0; iter < graph_gpu.edges; ++iter)
{
total_weight_gpu += h_mst_output[iter] * graph_gpu.edge_values[iter];
}
// correctness validation
long long int total_weight_cpu =
SimpleReferenceMST(graph_cpu.edge_values, graph_cpu);
if (total_weight_cpu == total_weight_gpu)
{
if (graph_gpu.nodes <= 50)
{
printf("GPU Minimum Spanning Tree\n");
// print the edge pairs in the minimum spanning tree
DisplaySolution(graph_gpu, h_mst_output);
}
printf("\nCORRECT.\n");
}
else
{
printf("INCORRECT. \n"
"CPU Computed Total Weight = %lld\n"
"GPU Computed Total Weight = %lld\n",
total_weight_cpu, total_weight_gpu);
}
// clean up if necessary
if (mst_problem) delete mst_problem;
if (h_mst_output) delete [] h_mst_output;
cudaDeviceSynchronize();
}
/**
* @brief RunTests entry
*
* @tparam VertexId
* @tparam Value
* @tparam SizeT
*
* @param[in] graph_gpu the CSR graph we process on
* @param[in] graph_cpu the CSR graph used for reference
* @param[in] args Reference to the command line arguments
* @param[in] context modern GPU CUDA context
*/
template <typename VertexId, typename Value, typename SizeT>
void RunTests(
const Csr<VertexId, Value, SizeT> &graph_gpu,
const Csr<VertexId, Value, SizeT> &graph_cpu,
CommandLineArgs &args,
mgpu::CudaContext& context)
{
bool instrumented = false; // do not collect instrumentation from kernels
int max_grid_size = 0; // maximum grid size (up to the enactor)
int num_gpus = 1; // number of GPUs for multi-gpu enactor to use
instrumented = args.CheckCmdLineFlag("instrumented");
g_quick = args.CheckCmdLineFlag("quick");
g_verbose = args.CheckCmdLineFlag("v");
if (instrumented)
{
RunTests<VertexId, Value, SizeT, true>(
graph_gpu, graph_cpu, max_grid_size, num_gpus, context);
}
else
{
RunTests<VertexId, Value, SizeT, false>(
graph_gpu, graph_cpu, max_grid_size, num_gpus, context);
}
}
////////////////////////////////////////////////////////////////////////////////
int main(int argc, char** argv)
{
CommandLineArgs args(argc, argv);
if ((argc < 3) || (args.CheckCmdLineFlag("help")))
{
Usage();
return 1;
}
int dev = 0;
args.GetCmdLineArgument("device", dev);
mgpu::ContextPtr context = mgpu::CreateCudaDevice(dev);
// parse graph-construction parameters
g_undirected = true;
std::string graph_type = argv[1];
int flags = args.ParsedArgc();
int graph_args = argc - flags - 1;
if (graph_args < 1)
{
Usage();
return 1;
}
//
// construct graph and perform algorithm
//
if (graph_type == "market")
{
// matrix-market coordinate-formatted graph file
typedef int VertexId; // use as the vertex identifier type
typedef int Value; // use as the value type
typedef int SizeT; // use as the graph size type
// default value for stream_from_host is false
if (graph_args < 1)
{
Usage();
return 1;
}
char * market_filename = (graph_args == 2) ? argv[2] : NULL;
// buildMarketGraph() reads a .mtx file into CSR data structure
// template argument = true because the graph has edge values
Csr<VertexId, Value, SizeT> csr_gpu(false);
if (graphio::BuildMarketGraph<true>(
market_filename,
csr_gpu,
g_undirected,
false) != 0) { return 1; }
// boost MST algorithm requires directed graph input
Csr<VertexId, Value, SizeT> csr_cpu(false);
if (graphio::BuildMarketGraph<true>(
market_filename,
csr_cpu,
g_undirected,
false) != 0) { return 1; }
// display graph
// csr_gpu.DisplayGraph();
// csr_cpu.DisplayGraph();
/***************************************************************
* To make sure two graphs have same weight value for each edge *
* we have to change ll_value = rand()%64 in market.cuh file to *
* some NON-RANDOM value if the original graph does NOT contain *
* weight per edge. Note it only support FULLY-CONNECTED graphs *
***************************************************************/
// run GPU tests
RunTests(csr_gpu, csr_cpu, args, *context);
}
else
{
// unknown graph type
fprintf(stderr, "Unspecified graph type\n");
return 1;
}
return 0;
}
// Leave this at the end of the file
// Local Variables:
// mode:c++
// c-file-style: "NVIDIA"
// End: |
55a225e8b5930bb219d6f4e7ce21bebc6d5eaefa.hip | // !!! This is a file automatically generated by hipify!!!
#include <stdbool.h>
#include <stdio.h>
#include <string.h>
#include <getopt.h>
#include <hiprand/hiprand_kernel.h>
#include <stdlib.h>
#include <hip/hip_runtime.h>
#include <sys/time.h>
#include "imperviousness_change_double.cu"
#include<chrono>
#include<iostream>
using namespace std;
using namespace std::chrono;
int blocks_[20][2] = {{8,8},{16,16},{24,24},{32,32},{1,64},{1,128},{1,192},{1,256},{1,320},{1,384},{1,448},{1,512},{1,576},{1,640},{1,704},{1,768},{1,832},{1,896},{1,960},{1,1024}};
int matrices_[7][2] = {{240,240},{496,496},{784,784},{1016,1016},{1232,1232},{1680,1680},{2024,2024}};
int main(int argc, char **argv) {
hipSetDevice(0);
char* p;int matrix_len=strtol(argv[1], &p, 10);
for(int matrix_looper=0;matrix_looper<matrix_len;matrix_looper++){
for(int block_looper=0;block_looper<20;block_looper++){
int XSIZE=matrices_[matrix_looper][0],YSIZE=matrices_[matrix_looper][1],BLOCKX=blocks_[block_looper][0],BLOCKY=blocks_[block_looper][1];
const unsigned char *dev_BIN1 = NULL;
hipMalloc(&dev_BIN1, XSIZE*YSIZE);
const unsigned char *dev_BIN2 = NULL;
hipMalloc(&dev_BIN2, XSIZE*YSIZE);
unsigned int WIDTH = 1;
unsigned int HEIGHT = 1;
double *dev_LTAKE_map = NULL;
hipMalloc(&dev_LTAKE_map, XSIZE*YSIZE);
int iXSIZE= XSIZE;
int iYSIZE= YSIZE;
while(iXSIZE%BLOCKX!=0)
{
iXSIZE++;
}
while(iYSIZE%BLOCKY!=0)
{
iYSIZE++;
}
dim3 gridBlock(iXSIZE/BLOCKX, iYSIZE/BLOCKY);
dim3 threadBlock(BLOCKX, BLOCKY);
hipFree(0);hipLaunchKernelGGL((
imperviousness_change_double), dim3(gridBlock),dim3(threadBlock), 0, 0, dev_BIN1,dev_BIN2,WIDTH,HEIGHT,dev_LTAKE_map);
hipDeviceSynchronize();
for (int loop_counter = 0; loop_counter < 10; ++loop_counter) {hipLaunchKernelGGL((
imperviousness_change_double), dim3(gridBlock),dim3(threadBlock), 0, 0, dev_BIN1,dev_BIN2,WIDTH,HEIGHT,dev_LTAKE_map);
}
auto start = steady_clock::now();
for (int loop_counter = 0; loop_counter < 1000; loop_counter++) {hipLaunchKernelGGL((
imperviousness_change_double), dim3(gridBlock),dim3(threadBlock), 0, 0, dev_BIN1,dev_BIN2,WIDTH,HEIGHT,dev_LTAKE_map);
}
auto end = steady_clock::now();
auto usecs = duration_cast<duration<float, microseconds::period> >(end - start);
cout <<'['<<usecs.count()<<','<<'('<<BLOCKX<<','<<BLOCKY<<')' << ','<<'('<<XSIZE<<','<<YSIZE<<')'<<']' << endl;
}
}} | 55a225e8b5930bb219d6f4e7ce21bebc6d5eaefa.cu | #include <stdbool.h>
#include <stdio.h>
#include <string.h>
#include <getopt.h>
#include <curand_kernel.h>
#include <stdlib.h>
#include <cuda.h>
#include <sys/time.h>
#include "imperviousness_change_double.cu"
#include<chrono>
#include<iostream>
using namespace std;
using namespace std::chrono;
int blocks_[20][2] = {{8,8},{16,16},{24,24},{32,32},{1,64},{1,128},{1,192},{1,256},{1,320},{1,384},{1,448},{1,512},{1,576},{1,640},{1,704},{1,768},{1,832},{1,896},{1,960},{1,1024}};
int matrices_[7][2] = {{240,240},{496,496},{784,784},{1016,1016},{1232,1232},{1680,1680},{2024,2024}};
int main(int argc, char **argv) {
cudaSetDevice(0);
char* p;int matrix_len=strtol(argv[1], &p, 10);
for(int matrix_looper=0;matrix_looper<matrix_len;matrix_looper++){
for(int block_looper=0;block_looper<20;block_looper++){
int XSIZE=matrices_[matrix_looper][0],YSIZE=matrices_[matrix_looper][1],BLOCKX=blocks_[block_looper][0],BLOCKY=blocks_[block_looper][1];
const unsigned char *dev_BIN1 = NULL;
cudaMalloc(&dev_BIN1, XSIZE*YSIZE);
const unsigned char *dev_BIN2 = NULL;
cudaMalloc(&dev_BIN2, XSIZE*YSIZE);
unsigned int WIDTH = 1;
unsigned int HEIGHT = 1;
double *dev_LTAKE_map = NULL;
cudaMalloc(&dev_LTAKE_map, XSIZE*YSIZE);
int iXSIZE= XSIZE;
int iYSIZE= YSIZE;
while(iXSIZE%BLOCKX!=0)
{
iXSIZE++;
}
while(iYSIZE%BLOCKY!=0)
{
iYSIZE++;
}
dim3 gridBlock(iXSIZE/BLOCKX, iYSIZE/BLOCKY);
dim3 threadBlock(BLOCKX, BLOCKY);
cudaFree(0);
imperviousness_change_double<<<gridBlock,threadBlock>>>(dev_BIN1,dev_BIN2,WIDTH,HEIGHT,dev_LTAKE_map);
cudaDeviceSynchronize();
for (int loop_counter = 0; loop_counter < 10; ++loop_counter) {
imperviousness_change_double<<<gridBlock,threadBlock>>>(dev_BIN1,dev_BIN2,WIDTH,HEIGHT,dev_LTAKE_map);
}
auto start = steady_clock::now();
for (int loop_counter = 0; loop_counter < 1000; loop_counter++) {
imperviousness_change_double<<<gridBlock,threadBlock>>>(dev_BIN1,dev_BIN2,WIDTH,HEIGHT,dev_LTAKE_map);
}
auto end = steady_clock::now();
auto usecs = duration_cast<duration<float, microseconds::period> >(end - start);
cout <<'['<<usecs.count()<<','<<'('<<BLOCKX<<','<<BLOCKY<<')' << ','<<'('<<XSIZE<<','<<YSIZE<<')'<<']' << endl;
}
}} |
4180c86d96b2d2814e5dfdc5498ed20fc6aecfa0.hip | // !!! This is a file automatically generated by hipify!!!
#include "discretize.h"
#include <hip/hip_runtime.h>
#include <map>
#include <vector>
#include <moab/ErrorCode>
#define CUDA_CHECK(value, label) { \
hipError_t c = (value); \
if (c != hipSuccess) { \
fprintf(stderr, \
"Error: '%s' at line %d in %s\n", \
hipGetErrorString(c),__LINE__,__FILE__); \
goto label; \
} }
std::vector<double> cuda_rayfire(moab::Instance MBI, moab::GeomTopoTool GTT,
moab::GeomQueryTool GQT, mesh_row row,
std::vector<moab::EntityHandle> vol_handles) {
std::vector<std::map<int, std::vector<double> > > row_totals;
std::vector<double> width;
moab::ErrorCode rval;
for (int i = 0; i < row.d3divs.size() - 1; i++) {
width.push_back(row.d3divs[i+1] - row.d3divs[i]);
}
row_totals.resize(width.size());
moab::EntityHandle root;
ErrorCode rval = geomTopoTool->get_root(vol_handles[0], root);
// somehow we need to get the OBBTree into an array of doubles.
// We'll call it obbs[]
// Unfortunately, we'll need to hope that the tree is balanced well, because
// we'll only be able to find things by having them ordered; root, level 1,
// level 1, level 2 ...
// Any leaves that aren't there will be left as gaps in the array.
// Each OBB will be taken up by
// center[0], center[1], center[2], length[0], length[1], length[2],
// axes[0], ..., axis[8], radius
double obbs[size_of_OBBT] = {};
// The size of this one is tricky; we don't know how many we'll end up hitting
// so we give it enough space for each of the leaf entitites in the OBBT plus
// each mesh boundary, assuming only two triangles per OBB.
double distances[size_of_OBBT/16 + width.size()];
// Oh shoot, that's how many we need PER RAY! and it's useless if we don't
// fire a bunch of rays, as that's where the parallelization happens. There
// is REALLY no memory for this.
double *d_obbs, *d_distances, *d_width;
CUDA_CHECK(hipMalloc(d_obbs, size_of_OBBT*sizeof(double)), cuda_error)
CUDA_CHECK(hipMalloc(d_width, width.size()*sizeof(double)), cuda_error)
CUDA_CHECK(hipMalloc(d_distances, (size_of_OBBT/16 + width.size())*sizeof(double)), cuda_error)
CUDA_CHECK(hipMemcpy(d_obbs, obbs, size_of_OBBT*sizeof(double), hipMemcpyHostToDevice), cuda_error)
CUDA_CHECK(hipMemcpy(d_width, &width[0], width.size()*sizeof(double), hipMemcpyHostToDevice), cuda_error)
N = row.num_rays;
// We're not using shared memory, so we're going to go with the maximum
// number of threads per block.
double blocksPerGrid = (N+1023)/1024;
double threadsPerBlock = 1024;
// There also needs to be a way to get the starting points of each ray;
// didn't get that far.
hipLaunchKernelGGL(( bogus_kernel), dim3(blocksPerGrid),dim3(threadsperblock), 0, 0, d_obbs,d_width,d_distances,N,row.grid);
// Don't actually do this. This would mangle the data.
CUDA_CHECK(hipMemcpy(distances, d_distances, (size_of_OBBT/16 + width.size())*sizeof(double), hipMemcpyDeviceToHost), cuda_error)
std::vector<int> distances_vec(std::begin(distances), std::end(distances));
hipFree(d_distances);
hipFree(d_obbs);
hipFree(d_width);
return distances_vec
}
| 4180c86d96b2d2814e5dfdc5498ed20fc6aecfa0.cu | #include "discretize.h"
#include <cuda.h>
#include <map>
#include <vector>
#include <moab/ErrorCode>
#define CUDA_CHECK(value, label) { \
cudaError_t c = (value); \
if (c != cudaSuccess) { \
fprintf(stderr, \
"Error: '%s' at line %d in %s\n", \
cudaGetErrorString(c),__LINE__,__FILE__); \
goto label; \
} }
std::vector<double> cuda_rayfire(moab::Instance MBI, moab::GeomTopoTool GTT,
moab::GeomQueryTool GQT, mesh_row row,
std::vector<moab::EntityHandle> vol_handles) {
std::vector<std::map<int, std::vector<double> > > row_totals;
std::vector<double> width;
moab::ErrorCode rval;
for (int i = 0; i < row.d3divs.size() - 1; i++) {
width.push_back(row.d3divs[i+1] - row.d3divs[i]);
}
row_totals.resize(width.size());
moab::EntityHandle root;
ErrorCode rval = geomTopoTool->get_root(vol_handles[0], root);
// somehow we need to get the OBBTree into an array of doubles.
// We'll call it obbs[]
// Unfortunately, we'll need to hope that the tree is balanced well, because
// we'll only be able to find things by having them ordered; root, level 1,
// level 1, level 2 ...
// Any leaves that aren't there will be left as gaps in the array.
// Each OBB will be taken up by
// center[0], center[1], center[2], length[0], length[1], length[2],
// axes[0], ..., axis[8], radius
double obbs[size_of_OBBT] = {};
// The size of this one is tricky; we don't know how many we'll end up hitting
// so we give it enough space for each of the leaf entitites in the OBBT plus
// each mesh boundary, assuming only two triangles per OBB.
double distances[size_of_OBBT/16 + width.size()];
// Oh shoot, that's how many we need PER RAY! and it's useless if we don't
// fire a bunch of rays, as that's where the parallelization happens. There
// is REALLY no memory for this.
double *d_obbs, *d_distances, *d_width;
CUDA_CHECK(cudaMalloc(d_obbs, size_of_OBBT*sizeof(double)), cuda_error)
CUDA_CHECK(cudaMalloc(d_width, width.size()*sizeof(double)), cuda_error)
CUDA_CHECK(cudaMalloc(d_distances, (size_of_OBBT/16 + width.size())*sizeof(double)), cuda_error)
CUDA_CHECK(cudaMemcpy(d_obbs, obbs, size_of_OBBT*sizeof(double), cudaMemcpyHostToDevice), cuda_error)
CUDA_CHECK(cudaMemcpy(d_width, &width[0], width.size()*sizeof(double), cudaMemcpyHostToDevice), cuda_error)
N = row.num_rays;
// We're not using shared memory, so we're going to go with the maximum
// number of threads per block.
double blocksPerGrid = (N+1023)/1024;
double threadsPerBlock = 1024;
// There also needs to be a way to get the starting points of each ray;
// didn't get that far.
bogus_kernel<<<blocksPerGrid,threadsperblock>>>(d_obbs,d_width,d_distances,N,row.grid);
// Don't actually do this. This would mangle the data.
CUDA_CHECK(cudaMemcpy(distances, d_distances, (size_of_OBBT/16 + width.size())*sizeof(double), cudaMemcpyDeviceToHost), cuda_error)
std::vector<int> distances_vec(std::begin(distances), std::end(distances));
cudaFree(d_distances);
cudaFree(d_obbs);
cudaFree(d_width);
return distances_vec
}
|
871e56f9abc00a1993f69d69574cbf5dd66dc27e.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
extern "C"
__global__ void shiftPoint(int totalPixels, double r, double g, double b, double* shiftedPointResults, double* weights, double *imageArray, double kernelBandwidth)
{
int pixel = blockIdx.x * blockDim.x + threadIdx.x;
if (pixel < totalPixels) {
double old_r = imageArray[pixel*3];
double old_g = imageArray[pixel*3 + 1];
double old_b = imageArray[pixel*3 + 2];
// Euclidean distance
double distance = 0.0;
distance += (r - old_r) * (r - old_r);
distance += (g - old_g) * (g - old_g);
distance += (b - old_b) * (b - old_b);
// Gaussian kernel
double weight = exp(-1 * (distance) / kernelBandwidth);
shiftedPointResults[pixel*3] = old_r * weight;
shiftedPointResults[pixel*3+1] = old_g * weight;
shiftedPointResults[pixel*3+2] = old_b * weight;
weights[pixel] = weight;
}
}
__global__ void reduceArray(double* in_data, double* out_data, unsigned int n) {
extern __shared__ double sdata[];
unsigned int tid = threadIdx.x;
unsigned int blockSize = blockDim.x;
unsigned int i = blockIdx.x*(256*2) + tid;
unsigned int gridSize = blockSize*2*gridDim.x;
sdata[tid] = 0;
while (i < n) { sdata[tid] += in_data[i*gridDim.y + blockIdx.y] + in_data[(i+blockSize)*gridDim.y + blockIdx.y]; i += gridSize; }
__syncthreads();
if (blockSize >= 512) { if (tid < 256) { sdata[tid] += sdata[tid + 256]; } __syncthreads(); }
if (blockSize >= 256) { if (tid < 128) { sdata[tid] += sdata[tid + 128]; } __syncthreads(); }
if (blockSize >= 128) { if (tid < 64) { sdata[tid] += sdata[tid + 64]; } __syncthreads(); }
if (tid < 32) {
if (blockSize >= 64) sdata[tid] += sdata[tid + 32];
if (blockSize >= 32) sdata[tid] += sdata[tid + 16];
if (blockSize >= 16) sdata[tid] += sdata[tid + 8];
if (blockSize >= 8) sdata[tid] += sdata[tid + 4];
if (blockSize >= 4) sdata[tid] += sdata[tid + 2];
if (blockSize >= 2) sdata[tid] += sdata[tid + 1];
}
if (tid == 0) out_data[(blockIdx.x)*gridDim.y+blockIdx.y] = sdata[0];
} | 871e56f9abc00a1993f69d69574cbf5dd66dc27e.cu | extern "C"
__global__ void shiftPoint(int totalPixels, double r, double g, double b, double* shiftedPointResults, double* weights, double *imageArray, double kernelBandwidth)
{
int pixel = blockIdx.x * blockDim.x + threadIdx.x;
if (pixel < totalPixels) {
double old_r = imageArray[pixel*3];
double old_g = imageArray[pixel*3 + 1];
double old_b = imageArray[pixel*3 + 2];
// Euclidean distance
double distance = 0.0;
distance += (r - old_r) * (r - old_r);
distance += (g - old_g) * (g - old_g);
distance += (b - old_b) * (b - old_b);
// Gaussian kernel
double weight = exp(-1 * (distance) / kernelBandwidth);
shiftedPointResults[pixel*3] = old_r * weight;
shiftedPointResults[pixel*3+1] = old_g * weight;
shiftedPointResults[pixel*3+2] = old_b * weight;
weights[pixel] = weight;
}
}
__global__ void reduceArray(double* in_data, double* out_data, unsigned int n) {
extern __shared__ double sdata[];
unsigned int tid = threadIdx.x;
unsigned int blockSize = blockDim.x;
unsigned int i = blockIdx.x*(256*2) + tid;
unsigned int gridSize = blockSize*2*gridDim.x;
sdata[tid] = 0;
while (i < n) { sdata[tid] += in_data[i*gridDim.y + blockIdx.y] + in_data[(i+blockSize)*gridDim.y + blockIdx.y]; i += gridSize; }
__syncthreads();
if (blockSize >= 512) { if (tid < 256) { sdata[tid] += sdata[tid + 256]; } __syncthreads(); }
if (blockSize >= 256) { if (tid < 128) { sdata[tid] += sdata[tid + 128]; } __syncthreads(); }
if (blockSize >= 128) { if (tid < 64) { sdata[tid] += sdata[tid + 64]; } __syncthreads(); }
if (tid < 32) {
if (blockSize >= 64) sdata[tid] += sdata[tid + 32];
if (blockSize >= 32) sdata[tid] += sdata[tid + 16];
if (blockSize >= 16) sdata[tid] += sdata[tid + 8];
if (blockSize >= 8) sdata[tid] += sdata[tid + 4];
if (blockSize >= 4) sdata[tid] += sdata[tid + 2];
if (blockSize >= 2) sdata[tid] += sdata[tid + 1];
}
if (tid == 0) out_data[(blockIdx.x)*gridDim.y+blockIdx.y] = sdata[0];
} |
a9c40dc7c117fa31ee7cea230330911efdd0f7d6.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <stdio.h>
#include<sys/time.h>
#include<math.h>
#define N 8192
#define nth 1024
__global__ void fast_transpose(size_t* A, size_t* B){
__shared__ size_t Ablock[nth];
__shared__ size_t Bblock[nth];
size_t dimx=blockDim.x;
size_t dimy=blockDim.y;
//dimx=linear dimension in x of a submatrix block
size_t th=threadIdx.x+threadIdx.y*dimx;
size_t thx=threadIdx.x;
size_t thy=threadIdx.y;
size_t starty=blockIdx.y*N*dimy;
size_t startx=blockIdx.x*dimx;
size_t start= startx+starty;
//Ablock is different for every block, so I can index it with th
Ablock[th]= A[start+thx+(thy)*(N)];
//creation of A completed for each block
__syncthreads();
//transpose into B block
Bblock[dimy*thx + thy] = Ablock[th];
__syncthreads();
//put Bblock in B
start=blockIdx.y*dimy+dimx*N*blockIdx.x; //the x block index of the original matrix becomes y index of transpose, so skip N
B[ start+thy+(thx)*(N) ]=Bblock[dimy*thx + thy];
}
__global__ void transpose(size_t* A, size_t *B){
size_t j=blockIdx.x;
size_t i=threadIdx.x;
while(i<N){
B[j+i*N]=A[i+j*N];
i+=blockDim.x;
}
}
/////////////////////C utilites//////////////////////////////
int transposed(size_t *A, size_t* At){
size_t i,j;
for(i=0;i<N;i++){
for(j=0;j<N;j++){
if(A[i+j*N]!=At[j+i*N]){return 0;}
}
}
return 1;
}
double seconds()
{
struct timeval tmp;
double sec;
gettimeofday( &tmp, (struct timezone *)0 );
sec = tmp.tv_sec + ((double)tmp.tv_usec)/1000000.0;
return sec;
}
////////////////////////////////////main
int main(){
size_t elements=N*N;
size_t space=N*N*sizeof(size_t);
size_t*A=(size_t*)malloc(space);
size_t*dev_A;
size_t*B=(size_t*)malloc(space);
size_t*dev_B;
size_t i;
for(i=0;i<elements;i++){
A[i]=i%N;
}
hipMalloc( (void**)&dev_A, space );
hipMalloc( (void**)&dev_B, space );
hipMemcpy( dev_A, A, space, hipMemcpyHostToDevice );
double tstart=seconds();
hipLaunchKernelGGL(( transpose), dim3(N), dim3(nth) , 0, 0, dev_A, dev_B);
hipDeviceSynchronize();
double duration=seconds()-tstart;
printf("transp time: %lf\n",duration);
hipMemcpy( B, dev_B, space, hipMemcpyDeviceToHost );
printf("correct? %d\n\n",transposed(A,B));
size_t block_side= (size_t)sqrt(nth);
dim3 grid,block;
if(block_side*block_side==nth){
grid.x=grid.y=N/block_side; //number of orizontal blocks=number of vertical blocks
block.x=block.y=block_side; //block linear length
}
else{
grid.x=N/32; //ideally, we should have an algorithm that given nth finds (a,b) integers such that nth=a*b and (a,b) closest to each other
grid.y=N/16; //to be preferred a>b, so that we read more often on x (continous in memory)
block.x=32;
block.y=16;
}
tstart=seconds();
hipLaunchKernelGGL(( fast_transpose), dim3(grid), dim3(block) , 0, 0, dev_A, dev_B);
hipDeviceSynchronize();
duration=seconds()-tstart;
printf("fast times: %lf\n",duration);
hipMemcpy( B, dev_B, space, hipMemcpyDeviceToHost );
/* for(i=0;i<elements;i++){
if(i%N==0 && i!=0)printf("\n");
printf("%d ", A[i]);
}
printf("\n");
for(i=0;i<elements;i++){
if(i%N==0 && i!=0)printf("\n");
printf("%d ", B[i]);
}
printf("\n"); */
printf("correct? %d\n\n",transposed(A,B));
free(A);free(B);
hipFree(dev_A);hipFree(dev_B);
}
| a9c40dc7c117fa31ee7cea230330911efdd0f7d6.cu | #include <stdio.h>
#include<sys/time.h>
#include<math.h>
#define N 8192
#define nth 1024
__global__ void fast_transpose(size_t* A, size_t* B){
__shared__ size_t Ablock[nth];
__shared__ size_t Bblock[nth];
size_t dimx=blockDim.x;
size_t dimy=blockDim.y;
//dimx=linear dimension in x of a submatrix block
size_t th=threadIdx.x+threadIdx.y*dimx;
size_t thx=threadIdx.x;
size_t thy=threadIdx.y;
size_t starty=blockIdx.y*N*dimy;
size_t startx=blockIdx.x*dimx;
size_t start= startx+starty;
//Ablock is different for every block, so I can index it with th
Ablock[th]= A[start+thx+(thy)*(N)];
//creation of A completed for each block
__syncthreads();
//transpose into B block
Bblock[dimy*thx + thy] = Ablock[th];
__syncthreads();
//put Bblock in B
start=blockIdx.y*dimy+dimx*N*blockIdx.x; //the x block index of the original matrix becomes y index of transpose, so skip N
B[ start+thy+(thx)*(N) ]=Bblock[dimy*thx + thy];
}
__global__ void transpose(size_t* A, size_t *B){
size_t j=blockIdx.x;
size_t i=threadIdx.x;
while(i<N){
B[j+i*N]=A[i+j*N];
i+=blockDim.x;
}
}
/////////////////////C utilites//////////////////////////////
int transposed(size_t *A, size_t* At){
size_t i,j;
for(i=0;i<N;i++){
for(j=0;j<N;j++){
if(A[i+j*N]!=At[j+i*N]){return 0;}
}
}
return 1;
}
double seconds()
{
struct timeval tmp;
double sec;
gettimeofday( &tmp, (struct timezone *)0 );
sec = tmp.tv_sec + ((double)tmp.tv_usec)/1000000.0;
return sec;
}
////////////////////////////////////main
int main(){
size_t elements=N*N;
size_t space=N*N*sizeof(size_t);
size_t*A=(size_t*)malloc(space);
size_t*dev_A;
size_t*B=(size_t*)malloc(space);
size_t*dev_B;
size_t i;
for(i=0;i<elements;i++){
A[i]=i%N;
}
cudaMalloc( (void**)&dev_A, space );
cudaMalloc( (void**)&dev_B, space );
cudaMemcpy( dev_A, A, space, cudaMemcpyHostToDevice );
double tstart=seconds();
transpose<<< N, nth >>>(dev_A, dev_B);
cudaDeviceSynchronize();
double duration=seconds()-tstart;
printf("transp time: %lf\n",duration);
cudaMemcpy( B, dev_B, space, cudaMemcpyDeviceToHost );
printf("correct? %d\n\n",transposed(A,B));
size_t block_side= (size_t)sqrt(nth);
dim3 grid,block;
if(block_side*block_side==nth){
grid.x=grid.y=N/block_side; //number of orizontal blocks=number of vertical blocks
block.x=block.y=block_side; //block linear length
}
else{
grid.x=N/32; //ideally, we should have an algorithm that given nth finds (a,b) integers such that nth=a*b and (a,b) closest to each other
grid.y=N/16; //to be preferred a>b, so that we read more often on x (continous in memory)
block.x=32;
block.y=16;
}
tstart=seconds();
fast_transpose<<< grid, block >>>(dev_A, dev_B);
cudaDeviceSynchronize();
duration=seconds()-tstart;
printf("fast times: %lf\n",duration);
cudaMemcpy( B, dev_B, space, cudaMemcpyDeviceToHost );
/* for(i=0;i<elements;i++){
if(i%N==0 && i!=0)printf("\n");
printf("%d ", A[i]);
}
printf("\n");
for(i=0;i<elements;i++){
if(i%N==0 && i!=0)printf("\n");
printf("%d ", B[i]);
}
printf("\n"); */
printf("correct? %d\n\n",transposed(A,B));
free(A);free(B);
cudaFree(dev_A);cudaFree(dev_B);
}
|
4dd9c9146ee1d91ca27c8cb11a28956baddd0e47.hip | // !!! This is a file automatically generated by hipify!!!
/** \file RayTraceCUDA_kernel.cu
* \author Tomasz Jakubczyk
* \brief RayTrace CUDA kernel function & helpers
*/
#define WIN32
#include<stdlib.h>
#include <math.h>
#include <float.h>
#include <hip/hip_runtime.h>
#include <helper_functions.h>
#include <helper_cuda.h>
#include <hip/hip_vector_types.h>
#include "helper_math.h"
#include "math_constants.h"
#include "rcstruct.cuh"
#include "HandlesStructures.cuh"
extern "C"
{
__device__
float3 findAlpha( float3 n, float3 v, float p, float m2 )
{
float al1=acos(dot(n,v));
float al2;
if(p==1)
{
al2=asin(sin(al1)/m2);
}
else
{
al2=asin(m2*sin(al1));
}
float bet=al1-al2;
float3 S=cross(v,n);
float3 V2;
if(length(S)==0.0f)
{
V2=v;
}
else
{
float W=S.x*S.x+S.y*S.y+S.z*S.z;
float2 B=make_float2(cos(bet),cos(al2));
float Wx=(B.x*n.y-B.y*v.y)*S.z+(B.y*v.z-B.x*n.z)*S.y;
float Wy=(B.y*v.x-B.x*n.x)*S.z+(B.x*n.z-B.y*v.z)*S.x;
float Wz=(B.y*v.y-B.x*n.y)*S.x+(B.x*n.x-B.y*v.x)*S.y;
V2=make_float3(Wx/W,Wy/W,Wz/W);
}
return V2;
}
__device__
rcstruct SphereCross( float3 r, float3 V, float R )
{
float A=V.x*V.x+V.y*V.y+V.z*V.z;
float B=2.0f*dot(r,V);
float C=r.x*r.x+r.y*r.y+r.z*r.z-R*R;
float D=B*B-4.0f*A*C;
rcstruct rc;
if(D<0.0f)
{
rc.a=make_float3(CUDART_NAN_F,CUDART_NAN_F,CUDART_NAN_F);
rc.b=make_float3(CUDART_NAN_F,CUDART_NAN_F,CUDART_NAN_F);
}
else
{
float t1=(-B+sqrt(D))/2.0f/A;
float t2=(-B-sqrt(D))/2.0f/A;
rc.a=r+V*t1;
rc.b=r+V*t2;
}
return rc;
}
__global__
/** \brief RayTrace CUDA kernel function.
*
* \param Br float*
* \param Vb float*
* \param VH float*
* \param Vb_length int
* \param VH_length int
* \param S HandlesStructures structure contains the parameters of the lens
* \param IC float* correction matrix
* \param PK float4* pixel position matrix
* \return void
*
*/
void RayTraceD(float* Br, float* Vb, float* VH, int Vb_length, int VH_length, HandlesStructures S, float* IC, float* PX)
{
// unique block index inside a 3D block grid
const unsigned int blockId = blockIdx.x //1D
+ blockIdx.y * gridDim.x //2D
+ gridDim.x * gridDim.y * blockIdx.z; //3D
uint index = __mul24(blockId,blockDim.x) + threadIdx.x;
//float3 P[11];
if(index==0)
{
//P[0]=make_float3(-1,-1,-1);//error1
}
uint indexi = index/Vb_length;
//if (indexi >= VH_length)
if (indexi >= Vb_length)
{
//P[index*7]=make_float3(-100,-100,-100);//error1
return;//empty kernel
}
uint indexj = index%Vb_length;
if (indexj >= Vb_length)
{
//P[index*7]=make_float3(-200,-200,-200);//error1
return;//critical error
}
float delta1=(float)indexi/(float)Vb_length;
if(delta1<0 || delta1>1)
return;
float delta2=(float)indexj/(float)Vb_length;
if(delta2<0 || delta2>1)
return;
//float theta=2.0f*3.1415f*delta1;
//float phi=2.0f*asin(sqrt(delta2));
float theta=0.5f*3.1415f*delta1-3.1415f/4.0f;
float phi=0.5f*asin(sqrt(delta2))-3.1415f/8.0f;
float3 P2=make_float3(Br[indexj],Vb[indexj],VH[indexi]);/**< point on the surface of the first diaphragm */
//uint p=0;
float3 nan3=make_float3(CUDART_NAN_F,CUDART_NAN_F,CUDART_NAN_F);
//Calculation of the position of the sphere's center
S.Cs1=S.l1-S.R1+S.g;
S.Cs2=S.Cs1+S.ll+2.0f*S.R2;
float3 P1 = S.Pk;//droplet coordinates
//float3 v = normalize(P2 - P1);//direction vector of the line
float3 v = make_float3( cos(theta)*cos(phi), cos(theta)*sin(phi), sin(theta) );
/// \todo z theta i phi uzyska wektor kierunkowy
//looking for the point of intersection of the line and lenses
//float t = (S.l1 - P2.x)/v.x;/// \todo
float t = (S.l1 - P1.x)/v.x;
//float3 P3 = P2 + t*v;//Point in the plane parallel to the flat surface of the lens
float3 P3 = P1 + t*v;
/// \todo P3.x=l1-P1.x
if (length(make_float2(P3.y,P3.z)) > (S.efD/2))//verification whether the point inside the aperture of the lens or not
{
//recalculate coordinates
//float Kp = length(make_float2(P3.y,P3.z))/(S.efD/2);
//P3.y/=Kp;
//P3.z/=Kp;
//v = normalize(P3 - P1);//direction vector of the line
return;
}
//normal vector to the surface
float3 n=make_float3(1.0f,0.0f,0.0f);
float3 v3 = findAlpha( n, v,1,S.m2 );
//For intensity calculation
float P8 = acos(dot(n,v));
rcstruct rc = SphereCross( make_float3( P3.x - S.Cs1, P3.y, P3.z ), v3,S.R1 );
if(isnan(rc.a.x))
{
/*p=0;
P[index*7+p++]=P1;
P[index*7+p++]=P2;
P[index*7+p++]=P3;
P[index*7+p++]=nan3;
P[index*7+p++]=nan3;
P[index*7+p++]=nan3;
P[index*7+p++]=nan3;*/
//P[index*7]=make_float3(100,100,100);//error1
return;
}
float3 ns = normalize(rc.a);
float3 v4 = findAlpha( ns, v3,2,S.m2 );
//For intensity calculation
float P9 = acos(dot(ns, v3));
float3 P4 = make_float3( rc.a.x + S.Cs1, rc.a.y, rc.a.z );
if(length(make_float2(rc.a.y,rc.a.z)) > S.D/2)
{
/*p=0;
P[index*7+p++]=P1;
P[index*7+p++]=P2;
P[index*7+p++]=P3;
P[index*7+p++]=P4;
P[index*7+p++]=nan3;
P[index*7+p++]=nan3;
P[index*7+p++]=nan3;*/
//P[index*7]=make_float3(200,200,200);//error2
return;
}
rcstruct rc1 = SphereCross( make_float3(P4.x-S.Cs2,P4.y,P4.z), v4,S.R2 );
if(isnan( rc1.a.x ))
{
/*p=0;
P[index*7+p++]=P1;
P[index*7+p++]=P2;
P[index*7+p++]=P3;
P[index*7+p++]=P4;
P[index*7+p++]=nan3;
P[index*7+p++]=nan3;
P[index*7+p++]=nan3;*/
//P[index*7]=make_float3(300,300,300);//error3
return;
}
float3 P5 = rc1.b;
P5.x = P5.x + S.Cs2;
if(length(make_float2(rc1.b.y,rc1.b.z)) > S.D/2)
{
/*p=0;
P[index*7+p++]=P1;
P[index*7+p++]=P2;
P[index*7+p++]=P3;
P[index*7+p++]=P5;
P[index*7+p++]=nan3;
P[index*7+p++]=nan3;
P[index*7+p++]=nan3;*/
//P[index*7]=make_float3(400,400,400);//error4
return;
}
ns = normalize(rc1.b);
float3 v5 = findAlpha( -ns, v4,1,S.m2 );
//For intensity calculation
float P10 = acos(dot(-ns, v4));
float X = S.l1 + 2*S.g + S.ll;
t = ( X - P5.x ) / v5.x;
float3 P6 = P5 + v5*t;
float3 v6 = findAlpha( n, v5,2,S.m2 );
//For intensity calculation
float P11 = acos(dot(n, v5));
t = (S.lCCD - P6.x ) / v6.x;
float3 P7 = P6 + v6*t;
/*p=0;
P[index*7+p++]=P1;
P[index*7+p++]=P2;
P[index*7+p++]=P3;
P[index*7+p++]=P4;
P[index*7+p++]=P5;
P[index*7+p++]=P6;
P[index*7+p++]=P7;*/
/*if(IM==NULL || IM==0)
{
//P[index*7]=make_float3(500,500,500);//error5
return;//no need to calculate image
}*/
/*float dist=length(P1-P2)+length(P2-P3)+
length(P3-P4)+length(P4-P5)+
length(P5-P6)+length(P6-P7);
float3 vR = normalize(P7-P6);
float alp = acos(dot(make_float3(1,0,0),vR));*/
float W = S.shX + ( S.CCDW/2.0f +P7.y)/S.PixSize - 1.0f;
if(round(W)>=640 || round(W)<0)
return;
float Hi = S.shY + ( S.CCDH/2.0f +P7.z)/S.PixSize - 1.0f;
if(round(Hi)>=480 || round(Hi)<0)
return;
//float value=cos(alp)/(dist*dist);
//Recording position of rays and a number of rays that walk into the cell
float value=1.0f;
float* val0;
val0=(float*)PX+(unsigned int)round(Hi)*4+(unsigned int)round(W)*480*4;
//atomicAdd(val0, P2.x);/// \todo zwraca theta i phi + korekcje w matlabie
atomicAdd(val0, theta);
val0=(float*)PX+1+(unsigned int)round(Hi)*4+(unsigned int)round(W)*480*4;
//atomicAdd(val0, P2.y);
atomicAdd(val0, phi);
//val0=(float*)PX+2+(unsigned int)round(Hi)*4+(unsigned int)round(W)*480*4;
//atomicAdd(val0, P2.z);
val0=(float*)PX+3+(unsigned int)round(Hi)*4+(unsigned int)round(W)*480*4;
atomicAdd(val0, value);//+1
//The calculation of energy loss, caused by reflection on lens surfaces and rising distance
// value=0.01f*(length(P1-P2) + length(P2-P3));
// value*=value;//fast square
float Ka1 = cos(P8)/value;
//KA1[index]=Ka1;
// value=0.01f*length(P3-P4);
// value*=value;
// float Ka2 = Ka1*cos(P9)/value;
//KA1[index]=Ka2;
// value=0.01f*length(P4-P5);
// value*=value;
// float Ka3 = Ka2*cos(P10)/value;
//KA1[index]=Ka3;
// value=cos(P11);//in calculation intensive code calculating same cosine twice isn't wise
// value*=value;
// float Ka4 = Ka3*value;
// value=0.01f*(length(P5-P6) + length(P6-P7));
// value*=value;
// Ka4/=value;
//KA1[index]=Ka4;
// value=1.0f/Ka4;
value=1.0f;
//KA1[index]=value;
val0=IC+(unsigned int)round(Hi)+(unsigned int)round(W)*480;
atomicAdd(val0, value);
//val0=&KA1[indexi];
//value=1.0f;
//atomicAdd(val0, value);
//float* val0=IM+(unsigned int)round(Hi)+(unsigned int)round(W)*480;
//atomicAdd(val0, value);
}
}//extern "C"
| 4dd9c9146ee1d91ca27c8cb11a28956baddd0e47.cu | /** \file RayTraceCUDA_kernel.cu
* \author Tomasz Jakubczyk
* \brief RayTrace CUDA kernel function & helpers
*/
#define WIN32
#include<stdlib.h>
#include <math.h>
#include <float.h>
#include <cuda_runtime.h>
#include <helper_functions.h>
#include <helper_cuda.h>
#include <vector_types.h>
#include "helper_math.h"
#include "math_constants.h"
#include "rcstruct.cuh"
#include "HandlesStructures.cuh"
extern "C"
{
__device__
float3 findAlpha( float3 n, float3 v, float p, float m2 )
{
float al1=acos(dot(n,v));
float al2;
if(p==1)
{
al2=asin(sin(al1)/m2);
}
else
{
al2=asin(m2*sin(al1));
}
float bet=al1-al2;
float3 S=cross(v,n);
float3 V2;
if(length(S)==0.0f)
{
V2=v;
}
else
{
float W=S.x*S.x+S.y*S.y+S.z*S.z;
float2 B=make_float2(cos(bet),cos(al2));
float Wx=(B.x*n.y-B.y*v.y)*S.z+(B.y*v.z-B.x*n.z)*S.y;
float Wy=(B.y*v.x-B.x*n.x)*S.z+(B.x*n.z-B.y*v.z)*S.x;
float Wz=(B.y*v.y-B.x*n.y)*S.x+(B.x*n.x-B.y*v.x)*S.y;
V2=make_float3(Wx/W,Wy/W,Wz/W);
}
return V2;
}
__device__
rcstruct SphereCross( float3 r, float3 V, float R )
{
float A=V.x*V.x+V.y*V.y+V.z*V.z;
float B=2.0f*dot(r,V);
float C=r.x*r.x+r.y*r.y+r.z*r.z-R*R;
float D=B*B-4.0f*A*C;
rcstruct rc;
if(D<0.0f)
{
rc.a=make_float3(CUDART_NAN_F,CUDART_NAN_F,CUDART_NAN_F);
rc.b=make_float3(CUDART_NAN_F,CUDART_NAN_F,CUDART_NAN_F);
}
else
{
float t1=(-B+sqrt(D))/2.0f/A;
float t2=(-B-sqrt(D))/2.0f/A;
rc.a=r+V*t1;
rc.b=r+V*t2;
}
return rc;
}
__global__
/** \brief RayTrace CUDA kernel function.
*
* \param Br float*
* \param Vb float*
* \param VH float*
* \param Vb_length int
* \param VH_length int
* \param S HandlesStructures structure contains the parameters of the lens
* \param IC float* correction matrix
* \param PK float4* pixel position matrix
* \return void
*
*/
void RayTraceD(float* Br, float* Vb, float* VH, int Vb_length, int VH_length, HandlesStructures S, float* IC, float* PX)
{
// unique block index inside a 3D block grid
const unsigned int blockId = blockIdx.x //1D
+ blockIdx.y * gridDim.x //2D
+ gridDim.x * gridDim.y * blockIdx.z; //3D
uint index = __mul24(blockId,blockDim.x) + threadIdx.x;
//float3 P[11];
if(index==0)
{
//P[0]=make_float3(-1,-1,-1);//error1
}
uint indexi = index/Vb_length;
//if (indexi >= VH_length)
if (indexi >= Vb_length)
{
//P[index*7]=make_float3(-100,-100,-100);//error1
return;//empty kernel
}
uint indexj = index%Vb_length;
if (indexj >= Vb_length)
{
//P[index*7]=make_float3(-200,-200,-200);//error1
return;//critical error
}
float delta1=(float)indexi/(float)Vb_length;
if(delta1<0 || delta1>1)
return;
float delta2=(float)indexj/(float)Vb_length;
if(delta2<0 || delta2>1)
return;
//float theta=2.0f*3.1415f*delta1;
//float phi=2.0f*asin(sqrt(delta2));
float theta=0.5f*3.1415f*delta1-3.1415f/4.0f;
float phi=0.5f*asin(sqrt(delta2))-3.1415f/8.0f;
float3 P2=make_float3(Br[indexj],Vb[indexj],VH[indexi]);/**< point on the surface of the first diaphragm */
//uint p=0;
float3 nan3=make_float3(CUDART_NAN_F,CUDART_NAN_F,CUDART_NAN_F);
//Calculation of the position of the sphere's center
S.Cs1=S.l1-S.R1+S.g;
S.Cs2=S.Cs1+S.ll+2.0f*S.R2;
float3 P1 = S.Pk;//droplet coordinates
//float3 v = normalize(P2 - P1);//direction vector of the line
float3 v = make_float3( cos(theta)*cos(phi), cos(theta)*sin(phi), sin(theta) );
/// \todo z theta i phi uzyskać wektor kierunkowy
//looking for the point of intersection of the line and lenses
//float t = (S.l1 - P2.x)/v.x;/// \todo
float t = (S.l1 - P1.x)/v.x;
//float3 P3 = P2 + t*v;//Point in the plane parallel to the flat surface of the lens
float3 P3 = P1 + t*v;
/// \todo P3.x=l1-P1.x
if (length(make_float2(P3.y,P3.z)) > (S.efD/2))//verification whether the point inside the aperture of the lens or not
{
//recalculate coordinates
//float Kp = length(make_float2(P3.y,P3.z))/(S.efD/2);
//P3.y/=Kp;
//P3.z/=Kp;
//v = normalize(P3 - P1);//direction vector of the line
return;
}
//normal vector to the surface
float3 n=make_float3(1.0f,0.0f,0.0f);
float3 v3 = findAlpha( n, v,1,S.m2 );
//For intensity calculation
float P8 = acos(dot(n,v));
rcstruct rc = SphereCross( make_float3( P3.x - S.Cs1, P3.y, P3.z ), v3,S.R1 );
if(isnan(rc.a.x))
{
/*p=0;
P[index*7+p++]=P1;
P[index*7+p++]=P2;
P[index*7+p++]=P3;
P[index*7+p++]=nan3;
P[index*7+p++]=nan3;
P[index*7+p++]=nan3;
P[index*7+p++]=nan3;*/
//P[index*7]=make_float3(100,100,100);//error1
return;
}
float3 ns = normalize(rc.a);
float3 v4 = findAlpha( ns, v3,2,S.m2 );
//For intensity calculation
float P9 = acos(dot(ns, v3));
float3 P4 = make_float3( rc.a.x + S.Cs1, rc.a.y, rc.a.z );
if(length(make_float2(rc.a.y,rc.a.z)) > S.D/2)
{
/*p=0;
P[index*7+p++]=P1;
P[index*7+p++]=P2;
P[index*7+p++]=P3;
P[index*7+p++]=P4;
P[index*7+p++]=nan3;
P[index*7+p++]=nan3;
P[index*7+p++]=nan3;*/
//P[index*7]=make_float3(200,200,200);//error2
return;
}
rcstruct rc1 = SphereCross( make_float3(P4.x-S.Cs2,P4.y,P4.z), v4,S.R2 );
if(isnan( rc1.a.x ))
{
/*p=0;
P[index*7+p++]=P1;
P[index*7+p++]=P2;
P[index*7+p++]=P3;
P[index*7+p++]=P4;
P[index*7+p++]=nan3;
P[index*7+p++]=nan3;
P[index*7+p++]=nan3;*/
//P[index*7]=make_float3(300,300,300);//error3
return;
}
float3 P5 = rc1.b;
P5.x = P5.x + S.Cs2;
if(length(make_float2(rc1.b.y,rc1.b.z)) > S.D/2)
{
/*p=0;
P[index*7+p++]=P1;
P[index*7+p++]=P2;
P[index*7+p++]=P3;
P[index*7+p++]=P5;
P[index*7+p++]=nan3;
P[index*7+p++]=nan3;
P[index*7+p++]=nan3;*/
//P[index*7]=make_float3(400,400,400);//error4
return;
}
ns = normalize(rc1.b);
float3 v5 = findAlpha( -ns, v4,1,S.m2 );
//For intensity calculation
float P10 = acos(dot(-ns, v4));
float X = S.l1 + 2*S.g + S.ll;
t = ( X - P5.x ) / v5.x;
float3 P6 = P5 + v5*t;
float3 v6 = findAlpha( n, v5,2,S.m2 );
//For intensity calculation
float P11 = acos(dot(n, v5));
t = (S.lCCD - P6.x ) / v6.x;
float3 P7 = P6 + v6*t;
/*p=0;
P[index*7+p++]=P1;
P[index*7+p++]=P2;
P[index*7+p++]=P3;
P[index*7+p++]=P4;
P[index*7+p++]=P5;
P[index*7+p++]=P6;
P[index*7+p++]=P7;*/
/*if(IM==NULL || IM==0)
{
//P[index*7]=make_float3(500,500,500);//error5
return;//no need to calculate image
}*/
/*float dist=length(P1-P2)+length(P2-P3)+
length(P3-P4)+length(P4-P5)+
length(P5-P6)+length(P6-P7);
float3 vR = normalize(P7-P6);
float alp = acos(dot(make_float3(1,0,0),vR));*/
float W = S.shX + ( S.CCDW/2.0f +P7.y)/S.PixSize - 1.0f;
if(round(W)>=640 || round(W)<0)
return;
float Hi = S.shY + ( S.CCDH/2.0f +P7.z)/S.PixSize - 1.0f;
if(round(Hi)>=480 || round(Hi)<0)
return;
//float value=cos(alp)/(dist*dist);
//Recording position of rays and a number of rays that walk into the cell
float value=1.0f;
float* val0;
val0=(float*)PX+(unsigned int)round(Hi)*4+(unsigned int)round(W)*480*4;
//atomicAdd(val0, P2.x);/// \todo zwracać theta i phi + korekcje w matlabie
atomicAdd(val0, theta);
val0=(float*)PX+1+(unsigned int)round(Hi)*4+(unsigned int)round(W)*480*4;
//atomicAdd(val0, P2.y);
atomicAdd(val0, phi);
//val0=(float*)PX+2+(unsigned int)round(Hi)*4+(unsigned int)round(W)*480*4;
//atomicAdd(val0, P2.z);
val0=(float*)PX+3+(unsigned int)round(Hi)*4+(unsigned int)round(W)*480*4;
atomicAdd(val0, value);//+1
//The calculation of energy loss, caused by reflection on lens surfaces and rising distance
// value=0.01f*(length(P1-P2) + length(P2-P3));
// value*=value;//fast square
float Ka1 = cos(P8)/value;
//KA1[index]=Ka1;
// value=0.01f*length(P3-P4);
// value*=value;
// float Ka2 = Ka1*cos(P9)/value;
//KA1[index]=Ka2;
// value=0.01f*length(P4-P5);
// value*=value;
// float Ka3 = Ka2*cos(P10)/value;
//KA1[index]=Ka3;
// value=cos(P11);//in calculation intensive code calculating same cosine twice isn't wise
// value*=value;
// float Ka4 = Ka3*value;
// value=0.01f*(length(P5-P6) + length(P6-P7));
// value*=value;
// Ka4/=value;
//KA1[index]=Ka4;
// value=1.0f/Ka4;
value=1.0f;
//KA1[index]=value;
val0=IC+(unsigned int)round(Hi)+(unsigned int)round(W)*480;
atomicAdd(val0, value);
//val0=&KA1[indexi];
//value=1.0f;
//atomicAdd(val0, value);
//float* val0=IM+(unsigned int)round(Hi)+(unsigned int)round(W)*480;
//atomicAdd(val0, value);
}
}//extern "C"
|
e1c98e2c3b48dc203e7124e0cbbf603235787052.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
__constant__ uint32_t k[] = {
0x428a2f98, 0x71374491, 0xb5c0fbcf, 0xe9b5dba5, 0x3956c25b, 0x59f111f1, 0x923f82a4, 0xab1c5ed5,
0xd807aa98, 0x12835b01, 0x243185be, 0x550c7dc3, 0x72be5d74, 0x80deb1fe, 0x9bdc06a7, 0xc19bf174,
0xe49b69c1, 0xefbe4786, 0x0fc19dc6, 0x240ca1cc, 0x2de92c6f, 0x4a7484aa, 0x5cb0a9dc, 0x76f988da,
0x983e5152, 0xa831c66d, 0xb00327c8, 0xbf597fc7, 0xc6e00bf3, 0xd5a79147, 0x06ca6351, 0x14292967,
0x27b70a85, 0x2e1b2138, 0x4d2c6dfc, 0x53380d13, 0x650a7354, 0x766a0abb, 0x81c2c92e, 0x92722c85,
0xa2bfe8a1, 0xa81a664b, 0xc24b8b70, 0xc76c51a3, 0xd192e819, 0xd6990624, 0xf40e3585, 0x106aa070,
0x19a4c116, 0x1e376c08, 0x2748774c, 0x34b0bcb5, 0x391c0cb3, 0x4ed8aa4a, 0x5b9cca4f, 0x682e6ff3,
0x748f82ee, 0x78a5636f, 0x84c87814, 0x8cc70208, 0x90befffa, 0xa4506ceb, 0xbef9a3f7, 0xc67178f2 };
__constant__ uint32_t h_init[] = { 0x6a09e667, 0xbb67ae85, 0x3c6ef372, 0xa54ff53a, 0x510e527f, 0x9b05688c, 0x1f83d9ab, 0x5be0cd19 };
#define HashPrimorial 16
#define Zrotr(a, b) ((a << b) | (a >> (32 - b)))
#define Ch(x, y, z) (z ^ (x & (y ^ z)))
#define Ma(x, y, z) ((x & z) | (y & (x | z)))
#define ZR25(n) ((Zrotr((n), 25) ^ Zrotr((n), 14) ^ ((n) >> 3U)))
#define ZR15(n) ((Zrotr((n), 15) ^ Zrotr((n), 13) ^ ((n) >> 10U)))
#define ZR26(n) ((Zrotr((n), 26) ^ Zrotr((n), 21) ^ Zrotr((n), 7)))
#define ZR30(n) ((Zrotr((n), 30) ^ Zrotr((n), 19) ^ Zrotr((n), 10)))
__constant__ uint32_t indexesOne[] = { 1, 2, 3, 5, 6 };
__constant__ uint32_t divisors24one[] = { 3, 5, 7, 13, 17 };
__constant__ uint32_t indexes[] = { 4, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19 };
__constant__ uint32_t divisors24[] = { 11, 19, 23, 29, 31, 37, 41, 43, 47, 53, 59, 61, 67, 71 };
__constant__ uint32_t modulos24one[] = {
0x1, 0x1, 0x1, 0x1, 0x1, 0x1, 0x1, 0x1, 0x1, 0x1, 0x1
};
__constant__ uint32_t modulos24[] = {
0x5, 0x3, 0x4, 0x9, 0x1, 0x5, 0x3, 0x4, 0x9, 0x1, 0x5, // 11
0x7, 0xb, 0x1, 0x7, 0xb, 0x1, 0x7, 0xb, 0x1, 0x7, 0xb, // 19
0x4, 0x10, 0x12, 0x3, 0xc, 0x2, 0x8, 0x9, 0xd, 0x6, 0x1, // 23
0x14, 0x17, 0x19, 0x7, 0x18, 0x10, 0x1, 0x14, 0x17, 0x19, 0x7, // 29
0x10, 0x8, 0x4, 0x2, 0x1, 0x10, 0x8, 0x4, 0x2, 0x1, 0x10, // 31
0xa, 0x1a, 0x1, 0xa, 0x1a, 0x1, 0xa, 0x1a, 0x1, 0xa, 0x1a, // 37
0x10, 0xa, 0x25, 0x12, 0x1, 0x10, 0xa, 0x25, 0x12, 0x1, 0x10, // 41
0x23, 0x15, 0x4, 0xb, 0x29, 0x10, 0x1, 0x23, 0x15, 0x4, 0xb, // 43
0x2, 0x4, 0x8, 0x10, 0x20, 0x11, 0x22, 0x15, 0x2a, 0x25, 0x1b, // 47
0xd, 0xa, 0x18, 0x2f, 0x1c, 0x2e, 0xf, 0x24, 0x2c, 0x2a, 0x10, // 53 *
0x23, 0x2d, 0x29, 0x13, 0x10, 0x1d, 0xc, 0x7, 0x9, 0x14, 0x33, // 59
0x14, 0x22, 0x9, 0x3a, 0x1, 0x14, 0x22, 0x9, 0x3a, 0x1, 0x14, // 61
0xe, 0x3e, 0x40, 0x19, 0xf, 0x9, 0x3b, 0x16, 0x28, 0x18, 0x1, // 67 *
0x3a, 0x1b, 0x4, 0x13, 0x25, 0x10, 0x5, 0x6, 0x40, 0x14, 0x18, // 71
};
__constant__ uint32_t multipliers32one[] = {
0xaaaaaaab, // 3
0x66666667, // 5
0x92492493, // 7
0x4ec4ec4f, // 13
0x78787879, // 17
};
__constant__ uint32_t multipliers32[] = {
0x2e8ba2e9, // 11
0x6bca1af3, // 19
0xb21642c9, // 23
0x8d3dcb09, // 29
0x84210843, // 31
0xdd67c8a7, // 37
0x63e7063f, // 41
0x2fa0be83, // 43
0xae4c415d, // 47
0x4d4873ed, // 53 *
0x22b63cbf, // 59
0x4325c53f, // 61
0x7a44c6b, // 67 *
0xe6c2b449 // 71
};
__constant__ uint32_t offsets32one[] = {
1, // 3
1, // 5
2, // 7
2, // 13
3 // 17
};
__constant__ uint32_t offsets32[] = {
1, // 11
3, // 19
4, // 23
4, // 29
4, // 31
5, // 37
4, // 41
3, // 43
5, // 47
4, // 53 *
3, // 59
4, // 61
1, // 67 *
6 // 71
};
// * using 24-bit arithmetic with primes 53,67 can produce wrong results!
__constant__ unsigned gPrimes[] = {
2,3,5,7,11,13,17,19,23,29,31,37,41,43,47,53,59,61,67,71,73
};
__device__ uint32_t sum24(const uint32_t *data, unsigned size, uint32_t *moddata)
{
unsigned size24 = size*32; size24 += size24 % 24 ? 24 - size24%24 : 0;
uint32_t acc = data[0] & 0x00FFFFFF;
#pragma unroll
for (unsigned i = 0, bitPos = 24; bitPos < size24; bitPos += 24, i++) {
uint64_t v64 = *(uint64_t*)(data+bitPos/32) >> (bitPos%32);
acc += __umul24(v64 & 0xFFFFFF, moddata[i]);
}
return acc;
}
__device__ unsigned check24(uint32_t X, uint32_t divisor, uint32_t inversedMultiplier, unsigned offset)
{
return X == divisor*(__umulhi(X, inversedMultiplier) >> offset);
}
__device__ unsigned divisionCheck24(const uint32_t *data,
unsigned size,
uint32_t divisor,
uint32_t *moddata,
uint32_t inversedMultiplier,
unsigned offset)
{
return check24(sum24(data, size, moddata), divisor, inversedMultiplier, offset);
}
__device__ uint32_t sha2_pack(uint32_t val) {
return ((val & 0xFF) << 24) | ((val & 0xFF00) << 8) | ((val & 0xFF0000) >> 8) | ((val & 0xFF000000) >> 24);
}
__device__ void sha256(const uint32_t *msg, uint32_t *s)
{
#define ROUND(num) {\
const uint32_t temp1 = h + ZR26(e) + Ch(e, f, g) + k[num] + w[num];\
const uint32_t temp2 = ZR30(a) + Ma(a, b, c);\
h = g;\
g = f;\
f = e;\
e = d + temp1;\
d = c;\
c = b;\
b = a;\
a = temp1 + temp2;\
}
uint32_t w[64];
#pragma unroll
for(int i = 0; i < 16; ++i)
w[i] = msg[i];
#pragma unroll
for(int i = 16; i < 64; ++i){
const uint32_t s0 = ZR25(w[i-15]);
const uint32_t s1 = ZR15(w[i-2]);
w[i] = w[i-16] + s0 + w[i-7] + s1;
}
uint32_t a = s[0];
uint32_t b = s[1];
uint32_t c = s[2];
uint32_t d = s[3];
uint32_t e = s[4];
uint32_t f = s[5];
uint32_t g = s[6];
uint32_t h = s[7];
ROUND(0)
ROUND(1)
ROUND(2)
ROUND(3)
ROUND(4)
ROUND(5)
ROUND(6)
ROUND(7)
ROUND(8)
ROUND(9)
ROUND(10)
ROUND(11)
ROUND(12)
ROUND(13)
ROUND(14)
ROUND(15)
ROUND(16)
ROUND(17)
ROUND(18)
ROUND(19)
ROUND(20)
ROUND(21)
ROUND(22)
ROUND(23)
ROUND(24)
ROUND(25)
ROUND(26)
ROUND(27)
ROUND(28)
ROUND(29)
ROUND(30)
ROUND(31)
ROUND(32)
ROUND(33)
ROUND(34)
ROUND(35)
ROUND(36)
ROUND(37)
ROUND(38)
ROUND(39)
ROUND(40)
ROUND(41)
ROUND(42)
ROUND(43)
ROUND(44)
ROUND(45)
ROUND(46)
ROUND(47)
ROUND(48)
ROUND(49)
ROUND(50)
ROUND(51)
ROUND(52)
ROUND(53)
ROUND(54)
ROUND(55)
ROUND(56)
ROUND(57)
ROUND(58)
ROUND(59)
ROUND(60)
ROUND(61)
ROUND(62)
ROUND(63)
s[0] += a;
s[1] += b;
s[2] += c;
s[3] += d;
s[4] += e;
s[5] += f;
s[6] += g;
s[7] += h;
#undef ROUND
}
__device__ void sha256UsePrecalc(const uint32_t *msg,
uint32_t *s,
const uint32_t *WData, int WSize,
const uint32_t *new1Data, int new1Size,
const uint32_t *new2Data, int new2Size,
const uint32_t *temp2Data, int tmp2Size)
{
#define ROUND(num) {\
const uint32_t temp1 = h + ZR26(e) + Ch(e, f, g) + k[num] + w[num];\
const uint32_t temp2 = ZR30(a) + Ma(a, b, c);\
h = g;\
g = f;\
f = e;\
if (num < new2Size)\
e = new2Data[num];\
else\
e = d + temp1;\
d = c;\
c = b;\
b = a;\
if (num < new1Size)\
a = new1Data[num];\
else if (num < tmp2Size)\
a = temp1 + temp2Data[num];\
else\
a = temp1 + temp2;\
}
uint32_t w[64];
#pragma unroll
for(int i = 0; i < 16; ++i)
w[i] = msg[i];
#pragma unroll
for(int i = 16; i < 64; ++i){
const uint32_t s0 = ZR25(w[i-15]);
const uint32_t s1 = ZR15(w[i-2]);
w[i] = w[i-16] + s0 + w[i-7] + s1;
}
uint32_t a = s[0];
uint32_t b = s[1];
uint32_t c = s[2];
uint32_t d = s[3];
uint32_t e = s[4];
uint32_t f = s[5];
uint32_t g = s[6];
uint32_t h = s[7];
ROUND(0)
ROUND(1)
ROUND(2)
ROUND(3)
ROUND(4)
ROUND(5)
ROUND(6)
ROUND(7)
ROUND(8)
ROUND(9)
ROUND(10)
ROUND(11)
ROUND(12)
ROUND(13)
ROUND(14)
ROUND(15)
ROUND(16)
ROUND(17)
ROUND(18)
ROUND(19)
ROUND(20)
ROUND(21)
ROUND(22)
ROUND(23)
ROUND(24)
ROUND(25)
ROUND(26)
ROUND(27)
ROUND(28)
ROUND(29)
ROUND(30)
ROUND(31)
ROUND(32)
ROUND(33)
ROUND(34)
ROUND(35)
ROUND(36)
ROUND(37)
ROUND(38)
ROUND(39)
ROUND(40)
ROUND(41)
ROUND(42)
ROUND(43)
ROUND(44)
ROUND(45)
ROUND(46)
ROUND(47)
ROUND(48)
ROUND(49)
ROUND(50)
ROUND(51)
ROUND(52)
ROUND(53)
ROUND(54)
ROUND(55)
ROUND(56)
ROUND(57)
ROUND(58)
ROUND(59)
ROUND(60)
ROUND(61)
ROUND(62)
ROUND(63)
s[0] += a;
s[1] += b;
s[2] += c;
s[3] += d;
s[4] += e;
s[5] += f;
s[6] += g;
s[7] += h;
#undef ROUND
}
#define select(a, b, c) ((c) ? (b) : (a))
__global__ void bhashmodUsePrecalc(uint32_t nonceOffset,
uint32_t *found,
uint32_t *fcount,
uint32_t *resultPrimorial,
uint32_t *midstate,
uint32_t merkle,
uint32_t time,
uint32_t nbits,
uint32_t W0,
uint32_t W1,
uint32_t new1_0,
uint32_t new1_1,
uint32_t new1_2,
uint32_t new2_0,
uint32_t new2_1,
uint32_t new2_2,
uint32_t temp2_3)
{
const uint32_t id = blockIdx.x * blockDim.x + threadIdx.x + nonceOffset;
uint32_t msg[16];
msg[0] = merkle;
msg[1] = time;
msg[2] = nbits;
msg[3] = sha2_pack(id);
msg[4] = sha2_pack(0x80);
#pragma unroll
for(int i = 5; i < 15; ++i)
msg[i] = 0;
msg[15] = 640;
uint32_t state[9];
#pragma unroll
for(int i = 0; i < 8; ++i)
state[i] = midstate[i];
uint32_t W[2] = {W0, W1};
uint32_t new1[3] = {new1_0, new1_1, new1_2};
uint32_t new2[3] = {new2_0, new2_1, new2_2};
uint32_t temp2[4] = {0, 0, 0, temp2_3};
sha256UsePrecalc(msg, state, W, 2, new1, 3, new2, 3, temp2, 4);
#pragma unroll
for(int i = 0; i < 8; ++i)
msg[i] = state[i];
msg[8] = sha2_pack(0x80);
msg[15] = 256;
#pragma unroll
for(int i = 0; i < 8; ++i)
state[i] = h_init[i];
sha256(msg, state);
for(int i = 0; i < 8; ++i)
state[i] = sha2_pack(state[i]);
if (state[7] & (1u << 31)) {
uint32_t count = !(state[0] & 0x1);
uint32_t primorialBitField = count;
state[8] = 0;
{
uint32_t acc = sum24(state, 8, modulos24one);
#pragma unroll
for (unsigned i = 0; i < 5; i++) {
unsigned isDivisor = check24(acc, divisors24one[i], multipliers32one[i], offsets32one[i]);
primorialBitField |= (isDivisor << indexesOne[i]);
count += isDivisor;
}
}
unsigned lastBit = 0;
#pragma unroll
for (unsigned i = 0; i < HashPrimorial-5; i++) {
unsigned isDivisor =
divisionCheck24(state, 8, divisors24[i], &modulos24[i*11], multipliers32[i], offsets32[i]);
primorialBitField |= (isDivisor << indexes[i]);
lastBit = isDivisor ? i+5 : lastBit;
}
uint32_t prod13l = 1;
for (unsigned i = 0; i < 8; i++)
prod13l = __umul24(prod13l, select(gPrimes[i], 1u, primorialBitField & (1u << i)));
prod13l *= select(gPrimes[8], 1u, primorialBitField & (1u << 8));
uint64_t prod13 = prod13l;
for (unsigned i = 9; i < 14; i++)
prod13 *= select(gPrimes[i], 1u, primorialBitField & (1u << i));
uint64_t prod14 = prod13 * select(gPrimes[14], 1u, primorialBitField & (1u << 14));
uint64_t prod15 = prod14 * select(gPrimes[15], 1u, primorialBitField & (1u << 15));
int p13isValid = ((64-__clzll(prod13)) < LIMIT13);
int p14Unique = !(p13isValid & (prod14 == prod13));
int p14isValid = ((64-__clzll(prod14)) < LIMIT14) & p14Unique;
int p15Unique = !(p13isValid & (prod15 == prod13)) & !(p14isValid & (prod15 == prod14));
int p15isValid = ((64-__clzll(prod15)) < LIMIT15) & p15Unique;
if (p13isValid) {
const uint32_t index = atomicAdd(fcount, 1);
resultPrimorial[index] = (primorialBitField & 0xFFFF) | (13u << 16);
found[index] = id;
}
if (p14isValid) {
const uint32_t index = atomicAdd(fcount, 1);
resultPrimorial[index] = (primorialBitField & 0xFFFF) | (14u << 16);
found[index] = id;
}
if (p15isValid) {
const uint32_t index = atomicAdd(fcount, 1);
resultPrimorial[index] = (primorialBitField & 0xFFFF) | (15u << 16);
found[index] = id;
}
}
}
| e1c98e2c3b48dc203e7124e0cbbf603235787052.cu |
__constant__ uint32_t k[] = {
0x428a2f98, 0x71374491, 0xb5c0fbcf, 0xe9b5dba5, 0x3956c25b, 0x59f111f1, 0x923f82a4, 0xab1c5ed5,
0xd807aa98, 0x12835b01, 0x243185be, 0x550c7dc3, 0x72be5d74, 0x80deb1fe, 0x9bdc06a7, 0xc19bf174,
0xe49b69c1, 0xefbe4786, 0x0fc19dc6, 0x240ca1cc, 0x2de92c6f, 0x4a7484aa, 0x5cb0a9dc, 0x76f988da,
0x983e5152, 0xa831c66d, 0xb00327c8, 0xbf597fc7, 0xc6e00bf3, 0xd5a79147, 0x06ca6351, 0x14292967,
0x27b70a85, 0x2e1b2138, 0x4d2c6dfc, 0x53380d13, 0x650a7354, 0x766a0abb, 0x81c2c92e, 0x92722c85,
0xa2bfe8a1, 0xa81a664b, 0xc24b8b70, 0xc76c51a3, 0xd192e819, 0xd6990624, 0xf40e3585, 0x106aa070,
0x19a4c116, 0x1e376c08, 0x2748774c, 0x34b0bcb5, 0x391c0cb3, 0x4ed8aa4a, 0x5b9cca4f, 0x682e6ff3,
0x748f82ee, 0x78a5636f, 0x84c87814, 0x8cc70208, 0x90befffa, 0xa4506ceb, 0xbef9a3f7, 0xc67178f2 };
__constant__ uint32_t h_init[] = { 0x6a09e667, 0xbb67ae85, 0x3c6ef372, 0xa54ff53a, 0x510e527f, 0x9b05688c, 0x1f83d9ab, 0x5be0cd19 };
#define HashPrimorial 16
#define Zrotr(a, b) ((a << b) | (a >> (32 - b)))
#define Ch(x, y, z) (z ^ (x & (y ^ z)))
#define Ma(x, y, z) ((x & z) | (y & (x | z)))
#define ZR25(n) ((Zrotr((n), 25) ^ Zrotr((n), 14) ^ ((n) >> 3U)))
#define ZR15(n) ((Zrotr((n), 15) ^ Zrotr((n), 13) ^ ((n) >> 10U)))
#define ZR26(n) ((Zrotr((n), 26) ^ Zrotr((n), 21) ^ Zrotr((n), 7)))
#define ZR30(n) ((Zrotr((n), 30) ^ Zrotr((n), 19) ^ Zrotr((n), 10)))
__constant__ uint32_t indexesOne[] = { 1, 2, 3, 5, 6 };
__constant__ uint32_t divisors24one[] = { 3, 5, 7, 13, 17 };
__constant__ uint32_t indexes[] = { 4, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19 };
__constant__ uint32_t divisors24[] = { 11, 19, 23, 29, 31, 37, 41, 43, 47, 53, 59, 61, 67, 71 };
__constant__ uint32_t modulos24one[] = {
0x1, 0x1, 0x1, 0x1, 0x1, 0x1, 0x1, 0x1, 0x1, 0x1, 0x1
};
__constant__ uint32_t modulos24[] = {
0x5, 0x3, 0x4, 0x9, 0x1, 0x5, 0x3, 0x4, 0x9, 0x1, 0x5, // 11
0x7, 0xb, 0x1, 0x7, 0xb, 0x1, 0x7, 0xb, 0x1, 0x7, 0xb, // 19
0x4, 0x10, 0x12, 0x3, 0xc, 0x2, 0x8, 0x9, 0xd, 0x6, 0x1, // 23
0x14, 0x17, 0x19, 0x7, 0x18, 0x10, 0x1, 0x14, 0x17, 0x19, 0x7, // 29
0x10, 0x8, 0x4, 0x2, 0x1, 0x10, 0x8, 0x4, 0x2, 0x1, 0x10, // 31
0xa, 0x1a, 0x1, 0xa, 0x1a, 0x1, 0xa, 0x1a, 0x1, 0xa, 0x1a, // 37
0x10, 0xa, 0x25, 0x12, 0x1, 0x10, 0xa, 0x25, 0x12, 0x1, 0x10, // 41
0x23, 0x15, 0x4, 0xb, 0x29, 0x10, 0x1, 0x23, 0x15, 0x4, 0xb, // 43
0x2, 0x4, 0x8, 0x10, 0x20, 0x11, 0x22, 0x15, 0x2a, 0x25, 0x1b, // 47
0xd, 0xa, 0x18, 0x2f, 0x1c, 0x2e, 0xf, 0x24, 0x2c, 0x2a, 0x10, // 53 *
0x23, 0x2d, 0x29, 0x13, 0x10, 0x1d, 0xc, 0x7, 0x9, 0x14, 0x33, // 59
0x14, 0x22, 0x9, 0x3a, 0x1, 0x14, 0x22, 0x9, 0x3a, 0x1, 0x14, // 61
0xe, 0x3e, 0x40, 0x19, 0xf, 0x9, 0x3b, 0x16, 0x28, 0x18, 0x1, // 67 *
0x3a, 0x1b, 0x4, 0x13, 0x25, 0x10, 0x5, 0x6, 0x40, 0x14, 0x18, // 71
};
__constant__ uint32_t multipliers32one[] = {
0xaaaaaaab, // 3
0x66666667, // 5
0x92492493, // 7
0x4ec4ec4f, // 13
0x78787879, // 17
};
__constant__ uint32_t multipliers32[] = {
0x2e8ba2e9, // 11
0x6bca1af3, // 19
0xb21642c9, // 23
0x8d3dcb09, // 29
0x84210843, // 31
0xdd67c8a7, // 37
0x63e7063f, // 41
0x2fa0be83, // 43
0xae4c415d, // 47
0x4d4873ed, // 53 *
0x22b63cbf, // 59
0x4325c53f, // 61
0x7a44c6b, // 67 *
0xe6c2b449 // 71
};
__constant__ uint32_t offsets32one[] = {
1, // 3
1, // 5
2, // 7
2, // 13
3 // 17
};
__constant__ uint32_t offsets32[] = {
1, // 11
3, // 19
4, // 23
4, // 29
4, // 31
5, // 37
4, // 41
3, // 43
5, // 47
4, // 53 *
3, // 59
4, // 61
1, // 67 *
6 // 71
};
// * using 24-bit arithmetic with primes 53,67 can produce wrong results!
__constant__ unsigned gPrimes[] = {
2,3,5,7,11,13,17,19,23,29,31,37,41,43,47,53,59,61,67,71,73
};
__device__ uint32_t sum24(const uint32_t *data, unsigned size, uint32_t *moddata)
{
unsigned size24 = size*32; size24 += size24 % 24 ? 24 - size24%24 : 0;
uint32_t acc = data[0] & 0x00FFFFFF;
#pragma unroll
for (unsigned i = 0, bitPos = 24; bitPos < size24; bitPos += 24, i++) {
uint64_t v64 = *(uint64_t*)(data+bitPos/32) >> (bitPos%32);
acc += __umul24(v64 & 0xFFFFFF, moddata[i]);
}
return acc;
}
__device__ unsigned check24(uint32_t X, uint32_t divisor, uint32_t inversedMultiplier, unsigned offset)
{
return X == divisor*(__umulhi(X, inversedMultiplier) >> offset);
}
__device__ unsigned divisionCheck24(const uint32_t *data,
unsigned size,
uint32_t divisor,
uint32_t *moddata,
uint32_t inversedMultiplier,
unsigned offset)
{
return check24(sum24(data, size, moddata), divisor, inversedMultiplier, offset);
}
__device__ uint32_t sha2_pack(uint32_t val) {
return ((val & 0xFF) << 24) | ((val & 0xFF00) << 8) | ((val & 0xFF0000) >> 8) | ((val & 0xFF000000) >> 24);
}
__device__ void sha256(const uint32_t *msg, uint32_t *s)
{
#define ROUND(num) {\
const uint32_t temp1 = h + ZR26(e) + Ch(e, f, g) + k[num] + w[num];\
const uint32_t temp2 = ZR30(a) + Ma(a, b, c);\
h = g;\
g = f;\
f = e;\
e = d + temp1;\
d = c;\
c = b;\
b = a;\
a = temp1 + temp2;\
}
uint32_t w[64];
#pragma unroll
for(int i = 0; i < 16; ++i)
w[i] = msg[i];
#pragma unroll
for(int i = 16; i < 64; ++i){
const uint32_t s0 = ZR25(w[i-15]);
const uint32_t s1 = ZR15(w[i-2]);
w[i] = w[i-16] + s0 + w[i-7] + s1;
}
uint32_t a = s[0];
uint32_t b = s[1];
uint32_t c = s[2];
uint32_t d = s[3];
uint32_t e = s[4];
uint32_t f = s[5];
uint32_t g = s[6];
uint32_t h = s[7];
ROUND(0)
ROUND(1)
ROUND(2)
ROUND(3)
ROUND(4)
ROUND(5)
ROUND(6)
ROUND(7)
ROUND(8)
ROUND(9)
ROUND(10)
ROUND(11)
ROUND(12)
ROUND(13)
ROUND(14)
ROUND(15)
ROUND(16)
ROUND(17)
ROUND(18)
ROUND(19)
ROUND(20)
ROUND(21)
ROUND(22)
ROUND(23)
ROUND(24)
ROUND(25)
ROUND(26)
ROUND(27)
ROUND(28)
ROUND(29)
ROUND(30)
ROUND(31)
ROUND(32)
ROUND(33)
ROUND(34)
ROUND(35)
ROUND(36)
ROUND(37)
ROUND(38)
ROUND(39)
ROUND(40)
ROUND(41)
ROUND(42)
ROUND(43)
ROUND(44)
ROUND(45)
ROUND(46)
ROUND(47)
ROUND(48)
ROUND(49)
ROUND(50)
ROUND(51)
ROUND(52)
ROUND(53)
ROUND(54)
ROUND(55)
ROUND(56)
ROUND(57)
ROUND(58)
ROUND(59)
ROUND(60)
ROUND(61)
ROUND(62)
ROUND(63)
s[0] += a;
s[1] += b;
s[2] += c;
s[3] += d;
s[4] += e;
s[5] += f;
s[6] += g;
s[7] += h;
#undef ROUND
}
__device__ void sha256UsePrecalc(const uint32_t *msg,
uint32_t *s,
const uint32_t *WData, int WSize,
const uint32_t *new1Data, int new1Size,
const uint32_t *new2Data, int new2Size,
const uint32_t *temp2Data, int tmp2Size)
{
#define ROUND(num) {\
const uint32_t temp1 = h + ZR26(e) + Ch(e, f, g) + k[num] + w[num];\
const uint32_t temp2 = ZR30(a) + Ma(a, b, c);\
h = g;\
g = f;\
f = e;\
if (num < new2Size)\
e = new2Data[num];\
else\
e = d + temp1;\
d = c;\
c = b;\
b = a;\
if (num < new1Size)\
a = new1Data[num];\
else if (num < tmp2Size)\
a = temp1 + temp2Data[num];\
else\
a = temp1 + temp2;\
}
uint32_t w[64];
#pragma unroll
for(int i = 0; i < 16; ++i)
w[i] = msg[i];
#pragma unroll
for(int i = 16; i < 64; ++i){
const uint32_t s0 = ZR25(w[i-15]);
const uint32_t s1 = ZR15(w[i-2]);
w[i] = w[i-16] + s0 + w[i-7] + s1;
}
uint32_t a = s[0];
uint32_t b = s[1];
uint32_t c = s[2];
uint32_t d = s[3];
uint32_t e = s[4];
uint32_t f = s[5];
uint32_t g = s[6];
uint32_t h = s[7];
ROUND(0)
ROUND(1)
ROUND(2)
ROUND(3)
ROUND(4)
ROUND(5)
ROUND(6)
ROUND(7)
ROUND(8)
ROUND(9)
ROUND(10)
ROUND(11)
ROUND(12)
ROUND(13)
ROUND(14)
ROUND(15)
ROUND(16)
ROUND(17)
ROUND(18)
ROUND(19)
ROUND(20)
ROUND(21)
ROUND(22)
ROUND(23)
ROUND(24)
ROUND(25)
ROUND(26)
ROUND(27)
ROUND(28)
ROUND(29)
ROUND(30)
ROUND(31)
ROUND(32)
ROUND(33)
ROUND(34)
ROUND(35)
ROUND(36)
ROUND(37)
ROUND(38)
ROUND(39)
ROUND(40)
ROUND(41)
ROUND(42)
ROUND(43)
ROUND(44)
ROUND(45)
ROUND(46)
ROUND(47)
ROUND(48)
ROUND(49)
ROUND(50)
ROUND(51)
ROUND(52)
ROUND(53)
ROUND(54)
ROUND(55)
ROUND(56)
ROUND(57)
ROUND(58)
ROUND(59)
ROUND(60)
ROUND(61)
ROUND(62)
ROUND(63)
s[0] += a;
s[1] += b;
s[2] += c;
s[3] += d;
s[4] += e;
s[5] += f;
s[6] += g;
s[7] += h;
#undef ROUND
}
#define select(a, b, c) ((c) ? (b) : (a))
__global__ void bhashmodUsePrecalc(uint32_t nonceOffset,
uint32_t *found,
uint32_t *fcount,
uint32_t *resultPrimorial,
uint32_t *midstate,
uint32_t merkle,
uint32_t time,
uint32_t nbits,
uint32_t W0,
uint32_t W1,
uint32_t new1_0,
uint32_t new1_1,
uint32_t new1_2,
uint32_t new2_0,
uint32_t new2_1,
uint32_t new2_2,
uint32_t temp2_3)
{
const uint32_t id = blockIdx.x * blockDim.x + threadIdx.x + nonceOffset;
uint32_t msg[16];
msg[0] = merkle;
msg[1] = time;
msg[2] = nbits;
msg[3] = sha2_pack(id);
msg[4] = sha2_pack(0x80);
#pragma unroll
for(int i = 5; i < 15; ++i)
msg[i] = 0;
msg[15] = 640;
uint32_t state[9];
#pragma unroll
for(int i = 0; i < 8; ++i)
state[i] = midstate[i];
uint32_t W[2] = {W0, W1};
uint32_t new1[3] = {new1_0, new1_1, new1_2};
uint32_t new2[3] = {new2_0, new2_1, new2_2};
uint32_t temp2[4] = {0, 0, 0, temp2_3};
sha256UsePrecalc(msg, state, W, 2, new1, 3, new2, 3, temp2, 4);
#pragma unroll
for(int i = 0; i < 8; ++i)
msg[i] = state[i];
msg[8] = sha2_pack(0x80);
msg[15] = 256;
#pragma unroll
for(int i = 0; i < 8; ++i)
state[i] = h_init[i];
sha256(msg, state);
for(int i = 0; i < 8; ++i)
state[i] = sha2_pack(state[i]);
if (state[7] & (1u << 31)) {
uint32_t count = !(state[0] & 0x1);
uint32_t primorialBitField = count;
state[8] = 0;
{
uint32_t acc = sum24(state, 8, modulos24one);
#pragma unroll
for (unsigned i = 0; i < 5; i++) {
unsigned isDivisor = check24(acc, divisors24one[i], multipliers32one[i], offsets32one[i]);
primorialBitField |= (isDivisor << indexesOne[i]);
count += isDivisor;
}
}
unsigned lastBit = 0;
#pragma unroll
for (unsigned i = 0; i < HashPrimorial-5; i++) {
unsigned isDivisor =
divisionCheck24(state, 8, divisors24[i], &modulos24[i*11], multipliers32[i], offsets32[i]);
primorialBitField |= (isDivisor << indexes[i]);
lastBit = isDivisor ? i+5 : lastBit;
}
uint32_t prod13l = 1;
for (unsigned i = 0; i < 8; i++)
prod13l = __umul24(prod13l, select(gPrimes[i], 1u, primorialBitField & (1u << i)));
prod13l *= select(gPrimes[8], 1u, primorialBitField & (1u << 8));
uint64_t prod13 = prod13l;
for (unsigned i = 9; i < 14; i++)
prod13 *= select(gPrimes[i], 1u, primorialBitField & (1u << i));
uint64_t prod14 = prod13 * select(gPrimes[14], 1u, primorialBitField & (1u << 14));
uint64_t prod15 = prod14 * select(gPrimes[15], 1u, primorialBitField & (1u << 15));
int p13isValid = ((64-__clzll(prod13)) < LIMIT13);
int p14Unique = !(p13isValid & (prod14 == prod13));
int p14isValid = ((64-__clzll(prod14)) < LIMIT14) & p14Unique;
int p15Unique = !(p13isValid & (prod15 == prod13)) & !(p14isValid & (prod15 == prod14));
int p15isValid = ((64-__clzll(prod15)) < LIMIT15) & p15Unique;
if (p13isValid) {
const uint32_t index = atomicAdd(fcount, 1);
resultPrimorial[index] = (primorialBitField & 0xFFFF) | (13u << 16);
found[index] = id;
}
if (p14isValid) {
const uint32_t index = atomicAdd(fcount, 1);
resultPrimorial[index] = (primorialBitField & 0xFFFF) | (14u << 16);
found[index] = id;
}
if (p15isValid) {
const uint32_t index = atomicAdd(fcount, 1);
resultPrimorial[index] = (primorialBitField & 0xFFFF) | (15u << 16);
found[index] = id;
}
}
}
|
95ba48440e3f864d8e2fcbb18fb1bf539b82225a.hip | // !!! This is a file automatically generated by hipify!!!
// Copyright (c) 2020 PaddlePaddle Authors. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
#include <thrust/device_vector.h>
#include <thrust/host_vector.h>
#include "paddle/fluid/operators/reduce_ops/cub_reduce.h"
#include "paddle/fluid/operators/trace_op.h"
namespace paddle {
namespace operators {
struct IdentityFunctor {
HOSTDEVICE explicit inline IdentityFunctor() {}
template <typename U>
HOSTDEVICE inline U operator()(const U& x) const {
return x;
}
};
template <typename DeviceContext, typename T>
class TraceCUDAKernel : public framework::OpKernel<T> {
public:
void Compute(const framework::ExecutionContext& context) const override {
auto* input = context.Input<framework::Tensor>("Input");
auto* out = context.Output<framework::Tensor>("Out");
const int64_t offset = context.Attr<int>("offset");
const int64_t dim1 = context.Attr<int>("axis1");
const int64_t dim2 = context.Attr<int>("axis2");
T* out_data = out->mutable_data<T>(context.GetPlace());
const framework::Tensor diag =
Diagonal<DeviceContext, T>(context, input, offset, dim1, dim2);
if (diag.numel() > 0) {
auto stream = context.cuda_device_context().stream();
std::vector<int> reduce_dims;
reduce_dims.push_back(out->dims().size());
TensorReduce<T, T, hipcub::Sum, IdentityFunctor>(
diag, out, reduce_dims, static_cast<T>(0), hipcub::Sum(),
IdentityFunctor(), stream);
}
}
};
} // namespace operators
} // namespace paddle
namespace ops = paddle::operators;
namespace platform = paddle::platform;
REGISTER_OP_CUDA_KERNEL(
trace, ops::TraceCUDAKernel<paddle::platform::CUDADeviceContext, int>,
ops::TraceCUDAKernel<paddle::platform::CUDADeviceContext, int64_t>,
ops::TraceCUDAKernel<paddle::platform::CUDADeviceContext,
platform::float16>,
ops::TraceCUDAKernel<paddle::platform::CUDADeviceContext, float>,
ops::TraceCUDAKernel<paddle::platform::CUDADeviceContext, double>,
ops::TraceCUDAKernel<paddle::platform::CUDADeviceContext,
paddle::platform::complex<float>>,
ops::TraceCUDAKernel<paddle::platform::CUDADeviceContext,
paddle::platform::complex<double>>);
REGISTER_OP_CUDA_KERNEL(
trace_grad, ops::TraceGradKernel<paddle::platform::CUDADeviceContext, int>,
ops::TraceGradKernel<paddle::platform::CUDADeviceContext, int64_t>,
ops::TraceGradKernel<paddle::platform::CUDADeviceContext,
platform::float16>,
ops::TraceGradKernel<paddle::platform::CUDADeviceContext, float>,
ops::TraceGradKernel<paddle::platform::CUDADeviceContext, double>,
ops::TraceGradKernel<paddle::platform::CUDADeviceContext,
paddle::platform::complex<float>>,
ops::TraceGradKernel<paddle::platform::CUDADeviceContext,
paddle::platform::complex<double>>);
| 95ba48440e3f864d8e2fcbb18fb1bf539b82225a.cu | // Copyright (c) 2020 PaddlePaddle Authors. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
#include <thrust/device_vector.h>
#include <thrust/host_vector.h>
#include "paddle/fluid/operators/reduce_ops/cub_reduce.h"
#include "paddle/fluid/operators/trace_op.h"
namespace paddle {
namespace operators {
struct IdentityFunctor {
HOSTDEVICE explicit inline IdentityFunctor() {}
template <typename U>
HOSTDEVICE inline U operator()(const U& x) const {
return x;
}
};
template <typename DeviceContext, typename T>
class TraceCUDAKernel : public framework::OpKernel<T> {
public:
void Compute(const framework::ExecutionContext& context) const override {
auto* input = context.Input<framework::Tensor>("Input");
auto* out = context.Output<framework::Tensor>("Out");
const int64_t offset = context.Attr<int>("offset");
const int64_t dim1 = context.Attr<int>("axis1");
const int64_t dim2 = context.Attr<int>("axis2");
T* out_data = out->mutable_data<T>(context.GetPlace());
const framework::Tensor diag =
Diagonal<DeviceContext, T>(context, input, offset, dim1, dim2);
if (diag.numel() > 0) {
auto stream = context.cuda_device_context().stream();
std::vector<int> reduce_dims;
reduce_dims.push_back(out->dims().size());
TensorReduce<T, T, cub::Sum, IdentityFunctor>(
diag, out, reduce_dims, static_cast<T>(0), cub::Sum(),
IdentityFunctor(), stream);
}
}
};
} // namespace operators
} // namespace paddle
namespace ops = paddle::operators;
namespace platform = paddle::platform;
REGISTER_OP_CUDA_KERNEL(
trace, ops::TraceCUDAKernel<paddle::platform::CUDADeviceContext, int>,
ops::TraceCUDAKernel<paddle::platform::CUDADeviceContext, int64_t>,
ops::TraceCUDAKernel<paddle::platform::CUDADeviceContext,
platform::float16>,
ops::TraceCUDAKernel<paddle::platform::CUDADeviceContext, float>,
ops::TraceCUDAKernel<paddle::platform::CUDADeviceContext, double>,
ops::TraceCUDAKernel<paddle::platform::CUDADeviceContext,
paddle::platform::complex<float>>,
ops::TraceCUDAKernel<paddle::platform::CUDADeviceContext,
paddle::platform::complex<double>>);
REGISTER_OP_CUDA_KERNEL(
trace_grad, ops::TraceGradKernel<paddle::platform::CUDADeviceContext, int>,
ops::TraceGradKernel<paddle::platform::CUDADeviceContext, int64_t>,
ops::TraceGradKernel<paddle::platform::CUDADeviceContext,
platform::float16>,
ops::TraceGradKernel<paddle::platform::CUDADeviceContext, float>,
ops::TraceGradKernel<paddle::platform::CUDADeviceContext, double>,
ops::TraceGradKernel<paddle::platform::CUDADeviceContext,
paddle::platform::complex<float>>,
ops::TraceGradKernel<paddle::platform::CUDADeviceContext,
paddle::platform::complex<double>>);
|
926d9cb7ba430188674d06cb3395412c2c358966.hip | // !!! This is a file automatically generated by hipify!!!
/*
* Zero-Copy example, using vector addition as showcase
*/
#include <stdio.h>
// For the CUDA runtime routines (prefixed with "cuda_")
#include <hip/hip_runtime.h>
#define SIZE (1048576)
// CUDA kernel, using zerocopy
__global__ void
vectorAdd(float *A, float *B, float *C, int numElements)
{
int id = blockDim.x * blockIdx.x + threadIdx.x;
if (id < SIZE) {
C[id] = A[id] + B[id];
}
}
/**
* Host main routine
*/
int
main(void)
{
float *h_A, *d_A, *h_B, *d_B, *h_C, *d_C;
// allocate host memory
h_A = (float*) malloc(SIZE * sizeof(float));
h_B = (float*) malloc(SIZE * sizeof(float));
h_C = (float*) malloc(SIZE * sizeof(float));
// allocate memory on device
hipMalloc(&d_A, SIZE * sizeof(float));
hipMalloc(&d_B, SIZE * sizeof(float));
hipMalloc(&d_C, SIZE * sizeof(float));
for (int i=0; i < SIZE; i++) {
h_A[i] = rand() / (float) RAND_MAX;
h_B[i] = rand() / (float) RAND_MAX;
}
hipMemcpy(d_A, h_A, SIZE * sizeof(float), hipMemcpyDefault); // we're using UVA...
hipMemcpy(d_B, h_B, SIZE * sizeof(float), hipMemcpyDefault);
printf("> run vectorAdd using copied device memory...\n");
dim3 block(256);
dim3 grid((unsigned int) ceil(SIZE / block.x));
// kernel call
hipLaunchKernelGGL(( vectorAdd), dim3(grid), dim3(block), 0, 0, d_A, d_B, d_C, SIZE);
hipError_t err = hipGetLastError();
if (err != hipSuccess) {
printf("err: %s\n", hipGetErrorString(err));
}
//vectorAdd<<<1024,1024>>>(d_A, d_B, d_C, SIZE);
hipMemcpy(h_C, d_C, SIZE * sizeof(float), hipMemcpyDefault);
hipDeviceSynchronize();
printf("> kernel call synchronized\n");
printf("%f vs %f\n", h_A[123]+h_B[123], h_C[123]);
printf("> releasing host memory...\n");
free(h_A);
free(h_B);
free(h_C);
hipFree(h_A);
hipFree(h_B);
hipFree(h_C);
printf("> done\n");
return 0;
}
| 926d9cb7ba430188674d06cb3395412c2c358966.cu | /*
* Zero-Copy example, using vector addition as showcase
*/
#include <stdio.h>
// For the CUDA runtime routines (prefixed with "cuda_")
#include <cuda_runtime.h>
#define SIZE (1048576)
// CUDA kernel, using zerocopy
__global__ void
vectorAdd(float *A, float *B, float *C, int numElements)
{
int id = blockDim.x * blockIdx.x + threadIdx.x;
if (id < SIZE) {
C[id] = A[id] + B[id];
}
}
/**
* Host main routine
*/
int
main(void)
{
float *h_A, *d_A, *h_B, *d_B, *h_C, *d_C;
// allocate host memory
h_A = (float*) malloc(SIZE * sizeof(float));
h_B = (float*) malloc(SIZE * sizeof(float));
h_C = (float*) malloc(SIZE * sizeof(float));
// allocate memory on device
cudaMalloc(&d_A, SIZE * sizeof(float));
cudaMalloc(&d_B, SIZE * sizeof(float));
cudaMalloc(&d_C, SIZE * sizeof(float));
for (int i=0; i < SIZE; i++) {
h_A[i] = rand() / (float) RAND_MAX;
h_B[i] = rand() / (float) RAND_MAX;
}
cudaMemcpy(d_A, h_A, SIZE * sizeof(float), cudaMemcpyDefault); // we're using UVA...
cudaMemcpy(d_B, h_B, SIZE * sizeof(float), cudaMemcpyDefault);
printf("> run vectorAdd using copied device memory...\n");
dim3 block(256);
dim3 grid((unsigned int) ceil(SIZE / block.x));
// kernel call
vectorAdd<<<grid, block>>>(d_A, d_B, d_C, SIZE);
cudaError_t err = cudaGetLastError();
if (err != cudaSuccess) {
printf("err: %s\n", cudaGetErrorString(err));
}
//vectorAdd<<<1024,1024>>>(d_A, d_B, d_C, SIZE);
cudaMemcpy(h_C, d_C, SIZE * sizeof(float), cudaMemcpyDefault);
cudaDeviceSynchronize();
printf("> kernel call synchronized\n");
printf("%f vs %f\n", h_A[123]+h_B[123], h_C[123]);
printf("> releasing host memory...\n");
free(h_A);
free(h_B);
free(h_C);
cudaFree(h_A);
cudaFree(h_B);
cudaFree(h_C);
printf("> done\n");
return 0;
}
|
44d1c8f6207c9394fb0ec18a0f20c1cfe9ee5acf.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#define CATCH_CONFIG_MAIN
#include "catch.hpp"
#include "ATen/ATen.h"
#include "test_seed.h"
#include "ATen/core/TensorAccessor.h"
#include "ATen/hip/HIPContext.h"
#include <assert.h>
using namespace at;
__global__ void test_tensor_packed_accessor_kernel(PackedTensorAccessor<float,1,RestrictPtrTraits> resa,
PackedTensorAccessor<float,2,RestrictPtrTraits> t1a,
PackedTensorAccessor<float,1,RestrictPtrTraits> t2a){
for (int64_t i = 0; i < resa.size(0); i++) {
float val = 0.0f;
for (int64_t j = 0; j < t1a.size(1); j++) {
val += t1a[i][j] * t2a[j];
}
resa[i] = val;
}
}
TEST_CASE( "test PackedTensorAccessor and Tensor.packed_accessor", "[cuda]" ) {
manual_seed(123, at::kCPU);
manual_seed(123, at::kCUDA);
Tensor t1 = rand({4, 4}, CUDA(kFloat));
Tensor t2 = rand({4}, CUDA(kFloat));
Tensor res = empty({4}, CUDA(kFloat));
auto t1a = t1.packed_accessor<float, 2, RestrictPtrTraits>();
auto t2a = t2.packed_accessor<float, 1, RestrictPtrTraits>();
auto resa = res.packed_accessor<float, 1, RestrictPtrTraits>();
auto stream = at::hip::getCurrentHIPStreamMasqueradingAsCUDA();
hipLaunchKernelGGL(( test_tensor_packed_accessor_kernel), dim3(1), dim3(1), 0, stream, resa, t1a, t2a);
hipError_t err = hipDeviceSynchronize();
REQUIRE(err == hipSuccess);
auto expected = mv(t1, t2);
REQUIRE(res.allclose(expected));
}
| 44d1c8f6207c9394fb0ec18a0f20c1cfe9ee5acf.cu | #define CATCH_CONFIG_MAIN
#include "catch.hpp"
#include "ATen/ATen.h"
#include "test_seed.h"
#include "ATen/core/TensorAccessor.h"
#include "ATen/cuda/CUDAContext.h"
#include <assert.h>
using namespace at;
__global__ void test_tensor_packed_accessor_kernel(PackedTensorAccessor<float,1,RestrictPtrTraits> resa,
PackedTensorAccessor<float,2,RestrictPtrTraits> t1a,
PackedTensorAccessor<float,1,RestrictPtrTraits> t2a){
for (int64_t i = 0; i < resa.size(0); i++) {
float val = 0.0f;
for (int64_t j = 0; j < t1a.size(1); j++) {
val += t1a[i][j] * t2a[j];
}
resa[i] = val;
}
}
TEST_CASE( "test PackedTensorAccessor and Tensor.packed_accessor", "[cuda]" ) {
manual_seed(123, at::kCPU);
manual_seed(123, at::kCUDA);
Tensor t1 = rand({4, 4}, CUDA(kFloat));
Tensor t2 = rand({4}, CUDA(kFloat));
Tensor res = empty({4}, CUDA(kFloat));
auto t1a = t1.packed_accessor<float, 2, RestrictPtrTraits>();
auto t2a = t2.packed_accessor<float, 1, RestrictPtrTraits>();
auto resa = res.packed_accessor<float, 1, RestrictPtrTraits>();
auto stream = at::cuda::getCurrentCUDAStream();
test_tensor_packed_accessor_kernel<<<1, 1, 0, stream>>>(resa, t1a, t2a);
cudaError_t err = cudaDeviceSynchronize();
REQUIRE(err == cudaSuccess);
auto expected = mv(t1, t2);
REQUIRE(res.allclose(expected));
}
|
444c6e7fcc5008b9ab436e630fff10ea11d1a4a2.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <iostream>
#include <signal.h>
#include "cuda_utils.h"
void cuda_sig_handler(int sig)
{
bool is_handled = false;
switch (sig)
{
case SIGINT:
std::cerr << "\nCaught SIGINT. ";
is_handled = true;
break;
case SIGTERM:
std::cerr << "\nCaught SIGTERM. ";
is_handled = true;
break;
default:
std::cerr << "\nUnhandled signal. Doing nothing.";
is_handled = false;
return;
}
if (is_handled)
{
// Reset GPU
std::cerr << "Resetting device(s)." << std::endl;
int devCount;
hipGetDeviceCount(&devCount);
for (int i = 0; i < devCount; ++i)
{
hipSetDevice(i);
hipDeviceReset();
}
exit(sig);
}
}
__global__ void dummy_kernel(int* A)
{
int tid = threadIdx.x + blockIdx.x * blockDim.x;
A[tid] = 42;
}
int main()
{
// Catch signals
if (signal(SIGINT, cuda_sig_handler) == SIG_ERR)
std::cerr << "Cannot catch SIGINT" << std::endl;
if (signal(SIGTERM, cuda_sig_handler) == SIG_ERR)
std::cerr << "Cannot catch SIGTERM" << std::endl;
// Vector size: 2^24 elements
const unsigned int N = 1 << 24;
// Device vector
int *d_A;
// Host vector
int *h_A;
// Allocate device memory
CUDA_SAFE_CALL(hipMalloc(&d_A, N * sizeof(int)));
// Allocate host memory
h_A = new int[N];
// Initialize host data
memset(h_A, 0, N * sizeof(int));
// Copy data to the device
CUDA_SAFE_CALL(hipMemcpy(d_A, h_A, N * sizeof(int), hipMemcpyHostToDevice));
dim3 grid_size, block_size;
block_size.x = 512;
grid_size.x = N/block_size.x;
// Long loop that gives time for the user to hit ctrl+C
for (unsigned int i = 0; i < 1e9; ++i)
{
hipLaunchKernelGGL(( dummy_kernel), dim3(grid_size), dim3(block_size), 0, 0, d_A);
}
// Copy result back to host
CUDA_SAFE_CALL(hipMemcpy(h_A, d_A, N * sizeof(int), hipMemcpyDeviceToHost));
// Free device memory
CUDA_SAFE_CALL(hipFree(d_A));
// Free host memory
delete [] h_A;
}
| 444c6e7fcc5008b9ab436e630fff10ea11d1a4a2.cu | #include <iostream>
#include <signal.h>
#include "cuda_utils.h"
void cuda_sig_handler(int sig)
{
bool is_handled = false;
switch (sig)
{
case SIGINT:
std::cerr << "\nCaught SIGINT. ";
is_handled = true;
break;
case SIGTERM:
std::cerr << "\nCaught SIGTERM. ";
is_handled = true;
break;
default:
std::cerr << "\nUnhandled signal. Doing nothing.";
is_handled = false;
return;
}
if (is_handled)
{
// Reset GPU
std::cerr << "Resetting device(s)." << std::endl;
int devCount;
cudaGetDeviceCount(&devCount);
for (int i = 0; i < devCount; ++i)
{
cudaSetDevice(i);
cudaDeviceReset();
}
exit(sig);
}
}
__global__ void dummy_kernel(int* A)
{
int tid = threadIdx.x + blockIdx.x * blockDim.x;
A[tid] = 42;
}
int main()
{
// Catch signals
if (signal(SIGINT, cuda_sig_handler) == SIG_ERR)
std::cerr << "Cannot catch SIGINT" << std::endl;
if (signal(SIGTERM, cuda_sig_handler) == SIG_ERR)
std::cerr << "Cannot catch SIGTERM" << std::endl;
// Vector size: 2^24 elements
const unsigned int N = 1 << 24;
// Device vector
int *d_A;
// Host vector
int *h_A;
// Allocate device memory
CUDA_SAFE_CALL(cudaMalloc(&d_A, N * sizeof(int)));
// Allocate host memory
h_A = new int[N];
// Initialize host data
memset(h_A, 0, N * sizeof(int));
// Copy data to the device
CUDA_SAFE_CALL(cudaMemcpy(d_A, h_A, N * sizeof(int), cudaMemcpyHostToDevice));
dim3 grid_size, block_size;
block_size.x = 512;
grid_size.x = N/block_size.x;
// Long loop that gives time for the user to hit ctrl+C
for (unsigned int i = 0; i < 1e9; ++i)
{
dummy_kernel<<<grid_size, block_size>>>(d_A);
}
// Copy result back to host
CUDA_SAFE_CALL(cudaMemcpy(h_A, d_A, N * sizeof(int), cudaMemcpyDeviceToHost));
// Free device memory
CUDA_SAFE_CALL(cudaFree(d_A));
// Free host memory
delete [] h_A;
}
|
ac4569ad98c0a89e96b72904568bfefd92375705.hip | // !!! This is a file automatically generated by hipify!!!
#include <stdlib.h>
#include <stdio.h>
#include <hip/hip_runtime.h>
#include <hiprand/hiprand.h>
#include <lwpr/lwpr.h>
#include <math.h>
#include <lwpr/lwpr_xml.h>
#define CONTROL_DIM 2
#define STATE_DIM 5
#define DERIV_STATE_DIM 3
//N is the number of states in an LWPR model, in this case 5
#define N 5
#define K 1000
#define M 16
#define T 60
#define HZ 20
#define MAX_VAR 10.0
#define BLOCKSIZE 1024
static void HandleError( hipError_t err,
const char *file,
int line ) {
if (err != hipSuccess) {
printf( "%s in %s at line %d\n", hipGetErrorString( err ),
file, line );
exit( EXIT_FAILURE );
}
}
#define HANDLE_ERROR( err ) (HandleError( err, __FILE__, __LINE__ ))
//Define a data structure which contains the elements
//of an LWPR receptive field needed to make a prediction.
typedef struct {
float c[N];
float D[N*N];
int trustworthy;
float beta0;
float mean_x[N];
int nReg;
float n_data[N];
float U[N*N];
float P[N*N];
float beta[N];
float SSs2[N];
float sum_e_cv2[N];
float sum_W[N];
float SSp;
} RF_Predict;
//Transfers data from a full receptive field to a (smaller) rfPredict struct
void rfTransfer(LWPR_ReceptiveField *rf_orig, RF_Predict *rf_pred, int nInS) {
int i,j;
int R = rf_orig->nReg;
for (i = 0; i < N; i++) {
for (j = 0; j < N; j++){
rf_pred->D[i*N + j] = float(rf_orig->D[nInS*i + j]);
}
}
for (i = 0; i < N; i++) {
for (j = 0; j < N; j++) {
if (i < R) {
rf_pred->U[i*N + j] = float(rf_orig->U[i*nInS + j]);
rf_pred->P[i*N + j] = float(rf_orig->P[i*nInS + j]);
}
else {
//Pad un-used part of the array with zeros to prevent memory leaks
rf_pred->U[i*N + j] = 0;
rf_pred->P[i*N + j] = 0;
}
}
}
for (i = 0; i < N; i++) {
rf_pred->c[i] = float(rf_orig->c[i]);
rf_pred->mean_x[i] = float(rf_orig->mean_x[i]);
}
for (i = 0; i < R; i++) {
rf_pred->n_data[i] = float(rf_orig->n_data[i]);
rf_pred->beta[i] = float(rf_orig->beta[i]);
rf_pred->SSs2[i] = float(rf_orig->SSs2[i]);
rf_pred->sum_e_cv2[i] = float(rf_orig->sum_e_cv2[i]);
rf_pred->sum_W[i] = float(rf_orig->sum_w[i]);
}
for (i = R; i < N; i++) {
rf_pred->n_data[i] = 0;
rf_pred->beta[i] = 0;
rf_pred->SSs2[i] = 0;
rf_pred->sum_e_cv2[i] = 0;
rf_pred->sum_W[i] = 0;
}
rf_pred->trustworthy = rf_orig->trustworthy;
rf_pred->beta0 = float(rf_orig->beta0);
rf_pred->nReg = rf_orig->nReg;
rf_pred->SSp = float(rf_orig->SSp);
}
//==============================================================================
//----------------------------CUDA FUNCTIONS------------------------------------
//==============================================================================
__constant__ float U_d[T*CONTROL_DIM];
__constant__ float dm_d[T*M*DERIV_STATE_DIM];
__constant__ float norm_in_d[N];
__device__ void print_vec(float* A, float* B, int n) {
printf("\n\n++++++++++++++++++++++++++++++++++++++++++");
printf("\n Printing A \n");
for (int i = 0; i < n; i++) {
printf(" %f ", A[i]);
}
printf("- \n \n -");
printf("\n Printing B \n");
for (int i = 0; i < n; i++) {
printf(" %f ", B[i]);
}
printf("=================================\n");
}
__device__ void rf_to_shared_mem(RF_Predict *rf_s, RF_Predict *rf_g, int idx) {
//Smaller indices load arrays
if (idx < N*N) {
rf_s->D[idx] = rf_g->D[idx];
}
else if (idx >= N*N && idx < 2*N*N) {
rf_s->U[idx-N*N] = rf_g->U[idx-N*N];
}
else if (idx >= 2*N*N && idx < 3*N*N) {
rf_s->P[idx-2*N*N] = rf_g->P[idx-2*N*N];
}
//Intermediate indices load vectors
else if (idx >= 3*N*N && idx < 3*N*N + N) {
rf_s->c[idx-3*N*N] = rf_g->c[idx-3*N*N];
}
else if (idx >= 3*N*N + N && idx < 3*N*N + 2*N) {
rf_s->mean_x[idx-(3*N*N + N)] = rf_g->mean_x[idx-(3*N*N + N)];
}
else if (idx >= 3*N*N + 2*N && idx < 3*N*N + 3*N) {
rf_s->n_data[idx-(3*N*N + 2*N)] = rf_g->n_data[idx-(3*N*N + 2*N)];
}
else if (idx >= 3*N*N + 3*N && idx < 3*N*N + 4*N) {
rf_s->beta[idx-(3*N*N + 3*N)] = rf_g->beta[idx-(3*N*N + 3*N)];
}
else if (idx >= 3*N*N + 4*N && idx < 3*N*N + 5*N) {
rf_s->SSs2[idx-(3*N*N + 4*N)] = rf_g->SSs2[idx-(3*N*N + 4*N)];
}
else if (idx >= 3*N*N + 5*N && idx < 3*N*N + 6*N) {
rf_s->sum_e_cv2[idx-(3*N*N + 5*N)] = rf_g->sum_e_cv2[idx-(3*N*N + 5*N)];
}
else if (idx >= 3*N*N + 6*N && idx < 3*N*N + 7*N) {
rf_s->sum_W[idx-(3*N*N + 6*N)] = rf_g->sum_W[idx-(3*N*N + 6*N)];
}
//Big indices load scalars
else if (idx == 3*N*N + 7*N) {
rf_s->trustworthy = rf_g->trustworthy;
}
else if (idx == 3*N*N + 7*N + 1) {
rf_s->beta0 = rf_g->beta0;
}
else if (idx == 3*N*N + 7*N + 2) {
rf_s->nReg = rf_g->nReg;
}
else if (idx == 3*N*N + 7*N + 3) {
rf_s->SSp = rf_g->SSp;
}
}
__device__ void compute_proj(int nR, float* s, float* xc, float* U, float* P) {
int i,j;
float dot;
float xu[N];
for (i = 0; i < N; i++) {
xu[i] = xc[i];
}
for (i = 0; i < nR - 1; i++) {
dot = 0;
for (j = 0; j < N; j++) {
dot += U[i*N + j]*xu[j];
}
s[i] = dot;
for (j = 0; j < N; j++) {
xu[j] -= s[i]*P[i*N + j];
}
}
dot = 0;
for (i = 0; i < N; i++) {
dot += U[(nR - 1)*N + i]*xu[i];
}
s[nR - 1] = dot;
}
__device__ void rf_predict(RF_Predict *rf, float* pred_helper, float* x, int index, int t) {
int i,j;
float xc[N];
for (i = 0; i < N; i++) {
xc[i] = x[i] - rf->c[i];
}
float dist = 0;
for (i = 0; i < N; i++) {
float dot = 0;
for (j = 0; j < N; j++) {
dot += rf->D[j*N + i]*xc[j];
}
dist += xc[i]*dot;
}
float w = __expf(-.5*dist);
float yp_n;
float sigma2;
if (w > .001 && rf->trustworthy) {
yp_n = rf->beta0;
sigma2 = 0.0;
for (i = 0; i < N; i++) {
xc[i] = x[i] - rf->mean_x[i];
}
int nR = rf->nReg;
if (rf->n_data[nR-1] <= 2*N) {
nR--;
}
float s[N];
compute_proj(nR, s, xc, rf->U, rf->P);
for (i = 0; i < nR; i++) {
yp_n += s[i]*rf->beta[i];
sigma2 += s[i]*s[i] / rf->SSs2[i];
}
sigma2 = rf->sum_e_cv2[nR-1]/(rf->sum_W[nR-1] - rf->SSp)*(1+w*sigma2);
pred_helper[0] = yp_n*w;
pred_helper[1] = w;
pred_helper[2] = w*yp_n*yp_n;
pred_helper[3] = w*sigma2;
}
else {
pred_helper[0] = 0;
pred_helper[1] = 0;
pred_helper[2] = 0;
pred_helper[3] = 0;
}
}
__device__ void compute_predict_conf(RF_Predict* rfs, float* x, int numRFS, float* vals, int t) {
int i;
float pred_helper[] = {0,0,0,0};
float sum_wy = 0;
float sum_w = 0;
float sum_wyy = 0;
float sum_conf = 0;
__shared__ RF_Predict rf_s0;
__shared__ RF_Predict rf_s1;
__shared__ RF_Predict rf_s2;
__shared__ RF_Predict rf_s3;
__shared__ RF_Predict rf_s4;
__shared__ RF_Predict rf_s5;
__shared__ RF_Predict rf_s6;
__shared__ RF_Predict rf_s7;
int tot_el = 3*N*N + 7*N + 4;
int idx = threadIdx.x*M + threadIdx.y;
for (i = 0; i < numRFS; i+= 7) {
__syncthreads();
if (idx < tot_el && i < numRFS) {
rf_to_shared_mem(&rf_s0, &rfs[i], idx);
}
else if (idx >= tot_el && idx < 2*tot_el && i + 1 < numRFS) {
rf_to_shared_mem(&rf_s1, &rfs[i+1], idx - tot_el);
}
else if (idx >= 2*tot_el && idx < 3*tot_el && i + 2 < numRFS) {
rf_to_shared_mem(&rf_s2, &rfs[i+2], idx - 2*tot_el);
}
else if (idx >= 3*tot_el && idx < 4*tot_el && i + 3 < numRFS) {
rf_to_shared_mem(&rf_s3, &rfs[i+3], idx - 3*tot_el);
}
else if (idx >= 4*tot_el && idx < 5*tot_el && i + 4 < numRFS) {
rf_to_shared_mem(&rf_s4, &rfs[i+4], idx - 4*tot_el);
}
else if (idx >= 5*tot_el && idx < 6*tot_el && i + 5 < numRFS) {
rf_to_shared_mem(&rf_s5, &rfs[i+5], idx - 5*tot_el);
}
else if (idx >= 6*tot_el && idx < 7*tot_el && i + 6 < numRFS) {
rf_to_shared_mem(&rf_s6, &rfs[i+6], idx - 6*tot_el);
}
else if (idx >= 7*tot_el && idx < 8*tot_el && i + 7 < numRFS) {
rf_to_shared_mem(&rf_s7, &rfs[i+7], idx - 7*tot_el);
}
__syncthreads();
rf_predict(&rf_s0, pred_helper, x, i, t);
sum_wy += pred_helper[0];
sum_w += pred_helper[1];
sum_wyy += pred_helper[2];
sum_conf += pred_helper[3];
if (i + 1 < numRFS) {
rf_predict(&rf_s1, pred_helper, x, i+1, t);
sum_wy += pred_helper[0];
sum_w += pred_helper[1];
sum_wyy += pred_helper[2];
sum_conf += pred_helper[3];
}
if (i + 2 < numRFS) {
rf_predict(&rf_s2, pred_helper, x, i+2, t);
sum_wy += pred_helper[0];
sum_w += pred_helper[1];
sum_wyy += pred_helper[2];
sum_conf += pred_helper[3];
}
if (i + 3 < numRFS) {
rf_predict(&rf_s3, pred_helper, x, i+3, t);
sum_wy += pred_helper[0];
sum_w += pred_helper[1];
sum_wyy += pred_helper[2];
sum_conf += pred_helper[3];
}
if (i + 4 < numRFS) {
rf_predict(&rf_s4, pred_helper, x, i+4, t);
sum_wy += pred_helper[0];
sum_w += pred_helper[1];
sum_wyy += pred_helper[2];
sum_conf += pred_helper[3];
}
if (i + 5 < numRFS) {
rf_predict(&rf_s5, pred_helper, x, i+5, t);
sum_wy += pred_helper[0];
sum_w += pred_helper[1];
sum_wyy += pred_helper[2];
sum_conf += pred_helper[3];
}
if (i + 6 < numRFS) {
rf_predict(&rf_s6, pred_helper, x, i+6, t);
sum_wy += pred_helper[0];
sum_w += pred_helper[1];
sum_wyy += pred_helper[2];
sum_conf += pred_helper[3];
}
if (i + 7 < numRFS) {
rf_predict(&rf_s7, pred_helper, x, i+7, t);
sum_wy += pred_helper[0];
sum_w += pred_helper[1];
sum_wyy += pred_helper[2];
sum_conf += pred_helper[3];
}
}
if (sum_w > 0) {
vals[0] = sum_wy/sum_w;
vals[1] = fmin((float)sqrt(fabs(sum_conf + sum_wyy - sum_wy*vals[0]))/sum_w, (float)MAX_VAR);
}
else {
vals[0] = 0;
vals[1] = MAX_VAR;
}
}
//Enforces constraints for the robot
__device__ void enforce_constraints(float* s) {
if (s[0] > 10.0) {
s[0] = 10.0;
}
else if (s[0] < -10.0) {
s[0] = -10.0;
}
if (s[1] > 10.0) {
s[1] = 10.0;
}
else if (s[1] < -10.0) {
s[1] = -10.0;
}
if (s[2] > 3.14) {
s[2] = -3.14;
}
else if (s[2] < -3.14) {
s[2] = 3.14;
}
if (s[3] > .5) {
s[3] = .5;
}
else if (s[3] < -.5) {
s[3] = -.5;
}
if (s[4] > .5) {
s[4] = .5;
}
else if (s[4] < -.5) {
s[4] = -.5;
}
}
__device__ void compute_dynamics(float* s, float* u, float* lwpr_input, RF_Predict* rfs1, RF_Predict* rfs2,
RF_Predict* rfs3, float* sigmas, int timestep, int numRFS1, int numRFS2, int numRFS3)
{
float dt = 1.0/(1.0*HZ);
//------Problem Specific------------
float vals[2];
//Normalize according to norm_in_d, note that all lwpr models
//have the same input, hence the same norm_in, and same input.
lwpr_input[0] = s[0]/norm_in_d[0];
lwpr_input[1] = s[1]/norm_in_d[1];
lwpr_input[2] = s[2]/norm_in_d[2];
lwpr_input[3] = s[3]/norm_in_d[3];
lwpr_input[4] = s[4]/norm_in_d[4];
//Compute the first prediction
compute_predict_conf(rfs1, lwpr_input, numRFS1, vals, timestep);
s[0] += dt*(vals[0] + vals[1]*dm_d[T*DERIV_STATE_DIM*threadIdx.y + DERIV_STATE_DIM*timestep]);
sigmas[0] = vals[1];
//Compute second prediction
compute_predict_conf(rfs2, lwpr_input, numRFS2, vals, timestep);
s[1] += dt*(vals[0] + vals[1]*dm_d[T*DERIV_STATE_DIM*threadIdx.y + DERIV_STATE_DIM*timestep + 1]);
sigmas[1] = vals[1];
//Compute third prediction
compute_predict_conf(rfs3, lwpr_input, numRFS3, vals, timestep);
s[2] += dt*(vals[0] + vals[1]*dm_d[T*DERIV_STATE_DIM*threadIdx.y + DERIV_STATE_DIM*timestep + 2]);
sigmas[2] = vals[1];
//Low pass filter controls
s[3] += dt*((u[0] + u[1]) - s[3]);
s[4] += dt*((u[0] - u[1]) - s[4]);
//Make sure all constraints are satisfied
enforce_constraints(s);
}
//Computes the immediate cost according to the PI^2 framework.
//TODO: Add control cost and anti-biasing term.
__device__ float compute_cost(float* s, float* u, float* goal, float* sigmas)
{
float d1 = (s[0] - goal[0]);
float d2 = (s[1] - goal[1]);
float cost = d1*d1 + d2*d2;
return cost;
}
__global__ void rollout_kernel(float* aug_state_costs_d, float* state_d, float* goal_d, RF_Predict* rfs1,
RF_Predict* rfs2, RF_Predict* rfs3, float* du_d, float* vars_d,
int numRFS1, int numRFS2, int numRFS3)
{
int tdx = threadIdx.x;
int tdy = threadIdx.y;
int bdx = blockIdx.x;
if (blockDim.x*bdx+tdx < K) {
//Initialize the local state
float s[STATE_DIM];
float u[CONTROL_DIM];
float lwpr_input[N];
float vars[CONTROL_DIM];
float sigmas[DERIV_STATE_DIM];
int i,j;
//Load the initial state
for (i = 0; i < STATE_DIM; i++) {
s[i] = state_d[i];
}
//Load vars
for (i = 0; i < CONTROL_DIM; i++) {
vars[i] = vars_d[i];
}
for (i = 0; i < T; i++) {
//Start the main program loop
for (j = 0; j < CONTROL_DIM; j++) {
if (bdx == 0 && tdx == 0) {
u[j] = U_d[i*CONTROL_DIM + j];
}
else {
u[j] = U_d[i*CONTROL_DIM + j] + du_d[CONTROL_DIM*T*(blockDim.x*bdx + tdx) + i*CONTROL_DIM + j]*vars[j];
}
}
//Check to see if the control commands are allowable
compute_dynamics(s, u, lwpr_input, rfs1, rfs2, rfs3, sigmas, i, numRFS1, numRFS2, numRFS3);
float inst_cost = compute_cost(s,u,goal_d, sigmas);
aug_state_costs_d[M*T*((blockDim.x)*bdx + tdx) + T*tdy + i] = inst_cost;
}
}
}
__global__ void expec_costs_kernel(float* state_costs_d, float* aug_state_costs_d)
{
int tdx = threadIdx.x;
int bdx = blockIdx.x;
float expec_cost = 0;
int i;
if (tdx < T && bdx < K) {
for (i = 0; i < M; i++) {
expec_cost += aug_state_costs_d[M*T*bdx + T*i + tdx];
}
state_costs_d[T*bdx + tdx] = expec_cost/(1.0*M);
}
}
__global__ void norm_exp_costs_kernel(float* state_costs_d)
{
int tdx = threadIdx.x;
int bdx = blockIdx.x;
int index = blockDim.x*bdx + tdx;
if (index < K) {
float cost2go = 0;
float nf_normal = 0;
int i;
for (i = T-1; i >= 0; i--) {
cost2go += state_costs_d[T*index + i];
nf_normal += state_costs_d[i];
state_costs_d[T*index + i] = __expf(-10.0*cost2go/nf_normal);
}
}
}
//=========================================================================================
//--------------------------------END CUDA------------------------------------------------
//========================================================================================
void compute_control(float* state, float* U, float* goal, LWPR_Model model1, LWPR_Model model2,
LWPR_Model model3, float* vars, hiprandGenerator_t gen) {
//Timing Code
hipEvent_t start, stop;
float time;
hipEventCreate(&start);
hipEventCreate(&stop);
//First we create du_d, perturbations of U which reside in device memory.
float* du_d;
HANDLE_ERROR( hipMalloc((void**)&du_d, K*T*CONTROL_DIM*sizeof(float)));
hiprandGenerateNormal(gen, du_d, K*T*CONTROL_DIM, 0.0, 1.0);
//Next we create dm_d perturbations of the LWPR model in device memory
float* dm_temp;
HANDLE_ERROR( hipMalloc((void**)&dm_temp, M*T*DERIV_STATE_DIM*sizeof(float)));
hiprandGenerateNormal(gen, dm_temp, M*T*DERIV_STATE_DIM, 0.0, 1.0);
HANDLE_ERROR( hipMemcpyToSymbol(dm_d, dm_temp, M*T*DERIV_STATE_DIM*sizeof(float), 0, hipMemcpyDeviceToDevice));
hipFree(dm_temp);
//Create pointers for state, U, goal, rfs1, rfs2, and vars in device memory
float* state_d;
float* goal_d;
float* vars_d;
//Transfer relevant data from host LWPR model to device LWPR Receptive Field
int i,j;
RF_Predict* rfs1;
RF_Predict* rfs2;
RF_Predict* rfs3;
rfs1 = (RF_Predict*)malloc(model1.sub[0].numRFS*sizeof(RF_Predict));
rfs2 = (RF_Predict*)malloc(model2.sub[0].numRFS*sizeof(RF_Predict));
rfs3 = (RF_Predict*)malloc(model3.sub[0].numRFS*sizeof(RF_Predict));
for (i = 0; i < model1.sub[0].numRFS; i++) {
rfTransfer(model1.sub[0].rf[i], &rfs1[i], model1.nInStore);
}
for (i = 0; i < model2.sub[0].numRFS; i++) {
rfTransfer(model2.sub[0].rf[i], &rfs2[i], model2.nInStore);
}
for (i = 0; i < model3.sub[0].numRFS; i++) {
rfTransfer(model3.sub[0].rf[i], &rfs3[i], model3.nInStore);
}
//Transfer norms to float arrays
float norm_in[N];
for (i = 0; i < N; i++) {
norm_in[i] = float(model1.norm_in[i]);
}
//Create device pointers for rfs1, rfs2, norm_in1, and norm_in2
RF_Predict* rfs1_d;
RF_Predict* rfs2_d;
RF_Predict* rfs3_d;
//Allocate space for state, U, goal, rfs1, rfs2, and vars in device memory
HANDLE_ERROR( hipMalloc((void**)&state_d, STATE_DIM*sizeof(float)));
HANDLE_ERROR( hipMalloc((void**)&goal_d, STATE_DIM*sizeof(float)));
HANDLE_ERROR( hipMalloc((void**)&vars_d, CONTROL_DIM*sizeof(float)));
HANDLE_ERROR( hipMalloc((void**)&rfs1_d, model1.sub[0].numRFS*sizeof(RF_Predict)));
HANDLE_ERROR( hipMalloc((void**)&rfs2_d, model2.sub[0].numRFS*sizeof(RF_Predict)));
HANDLE_ERROR( hipMalloc((void**)&rfs3_d, model3.sub[0].numRFS*sizeof(RF_Predict)));
//Copy state, U, goal, model1, and model2 into device memory
HANDLE_ERROR( hipMemcpy(state_d, state, STATE_DIM*sizeof(float), hipMemcpyHostToDevice));
HANDLE_ERROR( hipMemcpyToSymbol(U_d, U, CONTROL_DIM*T*sizeof(float), 0, hipMemcpyHostToDevice));
HANDLE_ERROR( hipMemcpy(goal_d, goal, STATE_DIM*sizeof(float), hipMemcpyHostToDevice));
HANDLE_ERROR( hipMemcpy(vars_d, vars, CONTROL_DIM*sizeof(float), hipMemcpyHostToDevice));
HANDLE_ERROR( hipMemcpy(rfs1_d, rfs1, model1.sub[0].numRFS*sizeof(RF_Predict), hipMemcpyHostToDevice));
HANDLE_ERROR( hipMemcpy(rfs2_d, rfs2, model2.sub[0].numRFS*sizeof(RF_Predict), hipMemcpyHostToDevice));
HANDLE_ERROR( hipMemcpy(rfs3_d, rfs3, model3.sub[0].numRFS*sizeof(RF_Predict), hipMemcpyHostToDevice));
HANDLE_ERROR( hipMemcpyToSymbol(norm_in_d, norm_in, N*sizeof(float), 0, hipMemcpyHostToDevice));
//Allocate space for the state costs and new controls
//For the raw state costs
float* aug_state_costs_d;
HANDLE_ERROR( hipMalloc((void**)&aug_state_costs_d, T*K*M*sizeof(float)));
//For the averaged state costs
float* state_costs_d;
//For controls we just re-use du_d
HANDLE_ERROR( hipMalloc((void**)&state_costs_d, T*K*sizeof(float)));
//Now we set the grid and block size
int xBlockSize = (BLOCKSIZE-1)/M + 1;
int yBlockSize = M;
int xGridSize = (K-1)/xBlockSize + 1;
dim3 dimBlock1(xBlockSize, yBlockSize, 1);
dim3 dimGrid1(xGridSize, 1, 1);
hipEventRecord(start, 0);
//Now we launch the kernel to compute the new control
hipLaunchKernelGGL(( rollout_kernel), dim3(dimGrid1), dim3(dimBlock1), 0, 0, aug_state_costs_d, state_d, goal_d, rfs1_d, rfs2_d, rfs3_d, du_d, vars_d, model1.sub[0].numRFS, model2.sub[0].numRFS, model3.sub[0].numRFS);
hipEventRecord(stop, 0);
hipEventSynchronize(stop);
hipDeviceSynchronize();
//Wait until the kernel has finished
dim3 dimBlock2(T, 1, 1);
dim3 dimGrid2(K, 1, 1);
//Compute expectation of the costs
hipLaunchKernelGGL(( expec_costs_kernel), dim3(dimGrid2), dim3(dimBlock2), 0, 0, state_costs_d, aug_state_costs_d);
hipDeviceSynchronize();
dim3 dimBlock3(64, 1, 1);
dim3 dimGrid3((K-1)/64 + 1, 1, 1);
//Now we normalize the cost-to-go by the noise free path, and exponentiate by the -lambda*cost2go
hipLaunchKernelGGL(( norm_exp_costs_kernel), dim3(dimGrid3), dim3(dimBlock3), 0, 0, state_costs_d);
hipDeviceSynchronize();
//Compute the normalizer
//For now just do it on the CPU
//Transfer state costs to host memory
float* state_costs;
state_costs = (float*)malloc(T*K*sizeof(float));
HANDLE_ERROR( hipMemcpy(state_costs, state_costs_d, T*K*sizeof(float), hipMemcpyDeviceToHost));
//Now compute the normalizer
float* normalizer;
normalizer = (float*)malloc(T*sizeof(float));
for (i = 0; i < T; i++) {
normalizer[i] = 0;
for (j = 0; j < K; j++) {
normalizer[i] += state_costs[T*j + i];
}
}
//Compute the new controls
//Just do on CPU for now
//First transfer controls to host memory
float* du;
du = (float*)malloc(T*K*CONTROL_DIM*sizeof(float));
HANDLE_ERROR( hipMemcpy(du, du_d, T*K*CONTROL_DIM*sizeof(float), hipMemcpyDeviceToHost));
//Now compute the new control and place it in U
float* U_new;
U_new = (float*)malloc(T*CONTROL_DIM*sizeof(float));
for (i = 0; i < T; i++) {
U_new[CONTROL_DIM*i] = (state_costs[i]/normalizer[i])*U[CONTROL_DIM*i];
U_new[CONTROL_DIM*i + 1] = (state_costs[i]/normalizer[i])*U[CONTROL_DIM*i + 1];
for (j = 0; j < K; j++) {
float u1 = U[CONTROL_DIM*i] + du[T*CONTROL_DIM*j + CONTROL_DIM*i]*vars[0];
float u2 = U[CONTROL_DIM*i + 1] + du[T*CONTROL_DIM*j + CONTROL_DIM*i + 1]*vars[1];
float u_max = .5;
float u_min = -.5;
u1 = fmin(u1, u_max);
u1 = fmax(u1, u_min);
u2 = fmin(u2, u_max);
u2 = fmax(u2, u_min);
U_new[CONTROL_DIM*i] += (state_costs[T*j + i]/normalizer[i])*u1;
U_new[CONTROL_DIM*i + 1] += (state_costs[T*j + i]/normalizer[i])*u2;
}
U[i*CONTROL_DIM] = U_new[i*CONTROL_DIM];
U[i*CONTROL_DIM + 1] = U_new[i*CONTROL_DIM + 1];
}
//Free device arrays
hipFree(state_d);
hipFree(goal_d);
hipFree(rfs1_d);
hipFree(rfs2_d);
hipFree(rfs3_d);
hipFree(du_d);
hipFree(state_costs_d);
hipFree(aug_state_costs_d);
hipFree(vars_d);
//Free host arrays
free(rfs1);
free(rfs2);
free(rfs3);
free(state_costs);
free(du);
free(normalizer);
//Print timing results
hipEventElapsedTime(&time, start, stop);
printf("Kernel Time: %f ms \n", time);
}
void dynamics(float* s, float* u, float dt) {
s[0] += dt*(s[3] + 1.1*s[4])/2.0*cos(s[2]);
s[1] += dt*(s[3] + 1.1*s[4])/2.0*sin(s[2]);
s[2] += dt*(s[3] - 1.1*s[4])/.258;
s[3] += dt*((u[0] + u[1]) - s[3]);
s[4] += dt*((u[0] - u[1]) - s[4]);
if (s[0] > 10.0) {
s[0] = 10.0;
}
else if (s[0] < -10.0) {
s[0] = -10.0;
}
if (s[1] > 10.0) {
s[1] = 10.0;
}
else if (s[1] < -10.0) {
s[1] = -10.0;
}
if (s[2] > 3.14) {
s[2] = -3.14;
}
else if (s[2] < -3.14) {
s[2] = 3.14;
}
if (s[3] > .5) {
s[3] = .5;
}
else if (s[3] < -.5) {
s[3] = -.5;
}
if (s[4] > .5) {
s[4] = .5;
}
else if (s[4] < -.5) {
s[4] = -.5;
}
}
int main() {
LWPR_Model model1;
LWPR_Model model2;
LWPR_Model model3;
char x_dot[] = {'t', 'r', 'a', 'j', '_', 'x', '.', 'x', 'm', 'l', '\0'};
char y_dot[] = {'t', 'r', 'a', 'j', '_', 'y', '.', 'x', 'm', 'l', '\0'};
char theta_dot[] = {'t', 'h', 'e', 't', 'a', '.', 'x', 'm', 'l', '\0'};
int e1[] = {-3};
int e2[] = {-3};
int e3[] = {-3};
lwpr_init_model(&model1, 5, 1, "x");
lwpr_init_model(&model2, 5, 1, "y");
lwpr_init_model(&model3, 5, 1, "theta");
//lwpr_read_xml(&model1, x_dot, e1);
//lwpr_read_xml(&model2, y_dot, e2);
//lwpr_read_xml(&model3, theta_dot, e3);
printf("%d %d %d", e1[0], e2[0], e3[0]);
float U[T*CONTROL_DIM] = {0};
float u[CONTROL_DIM] = {0};
float s[STATE_DIM] = {0};
float goal[] = {2.0, 2.0, 0, 0, 0};
float vars[] = {.50, .25};
hiprandGenerator_t gen;
float dt = (1.0)/(1.0*HZ);
hiprandCreateGenerator(&gen, HIPRAND_RNG_PSEUDO_DEFAULT);
hiprandSetPseudoRandomGeneratorSeed(gen, 1234ULL);
int i,j;
for (j = 0; j < 2500; j++) {
compute_control(s, U, goal, model1, model2, model3, vars, gen);
u[0] = U[0];
u[1] = U[1];
for (i = 0; i < (T-1)*CONTROL_DIM; i++) {
U[i] = U[i+CONTROL_DIM];
}
U[T-2] = 0;
U[T-1] = 0;
double lwpr_input[5] = {s[0], s[1], s[2], s[3], s[4]};
double out1 = s[0];
double out2 = s[1];
double out3 = s[2];
dynamics(s, u, dt);
out1 = (s[0] - out1)/dt;
out2 = (s[1] - out2)/dt;
out3 = (s[2] - out3)/dt;
lwpr_update(&model1, lwpr_input, &out1, NULL, NULL);
lwpr_update(&model2, lwpr_input, &out2, NULL, NULL);
lwpr_update(&model3, lwpr_input, &out3, NULL, NULL);
printf("Current Location: (%f, %f, %f, %f, %f,) \n", s[0], s[1], s[2], s[3], s[4]);
}
//Save the LWPR models
char xn_dot[] = {'x', 'n', '.', 'x', 'm', 'l', '\0'};
char yn_dot[] = {'y', 'n', '.', 'x', 'm', 'l', '\0'};
char thetan_dot[] = {'t', 'h', 'e', 't', 'a', 'n', '.', 'x', 'm', 'l', '\0'};
lwpr_write_xml(&model1, xn_dot);
lwpr_write_xml(&model2, yn_dot);
lwpr_write_xml(&model3, thetan_dot);
}
| ac4569ad98c0a89e96b72904568bfefd92375705.cu | #include <stdlib.h>
#include <stdio.h>
#include <cuda.h>
#include <curand.h>
#include <lwpr/lwpr.h>
#include <math.h>
#include <lwpr/lwpr_xml.h>
#define CONTROL_DIM 2
#define STATE_DIM 5
#define DERIV_STATE_DIM 3
//N is the number of states in an LWPR model, in this case 5
#define N 5
#define K 1000
#define M 16
#define T 60
#define HZ 20
#define MAX_VAR 10.0
#define BLOCKSIZE 1024
static void HandleError( cudaError_t err,
const char *file,
int line ) {
if (err != cudaSuccess) {
printf( "%s in %s at line %d\n", cudaGetErrorString( err ),
file, line );
exit( EXIT_FAILURE );
}
}
#define HANDLE_ERROR( err ) (HandleError( err, __FILE__, __LINE__ ))
//Define a data structure which contains the elements
//of an LWPR receptive field needed to make a prediction.
typedef struct {
float c[N];
float D[N*N];
int trustworthy;
float beta0;
float mean_x[N];
int nReg;
float n_data[N];
float U[N*N];
float P[N*N];
float beta[N];
float SSs2[N];
float sum_e_cv2[N];
float sum_W[N];
float SSp;
} RF_Predict;
//Transfers data from a full receptive field to a (smaller) rfPredict struct
void rfTransfer(LWPR_ReceptiveField *rf_orig, RF_Predict *rf_pred, int nInS) {
int i,j;
int R = rf_orig->nReg;
for (i = 0; i < N; i++) {
for (j = 0; j < N; j++){
rf_pred->D[i*N + j] = float(rf_orig->D[nInS*i + j]);
}
}
for (i = 0; i < N; i++) {
for (j = 0; j < N; j++) {
if (i < R) {
rf_pred->U[i*N + j] = float(rf_orig->U[i*nInS + j]);
rf_pred->P[i*N + j] = float(rf_orig->P[i*nInS + j]);
}
else {
//Pad un-used part of the array with zeros to prevent memory leaks
rf_pred->U[i*N + j] = 0;
rf_pred->P[i*N + j] = 0;
}
}
}
for (i = 0; i < N; i++) {
rf_pred->c[i] = float(rf_orig->c[i]);
rf_pred->mean_x[i] = float(rf_orig->mean_x[i]);
}
for (i = 0; i < R; i++) {
rf_pred->n_data[i] = float(rf_orig->n_data[i]);
rf_pred->beta[i] = float(rf_orig->beta[i]);
rf_pred->SSs2[i] = float(rf_orig->SSs2[i]);
rf_pred->sum_e_cv2[i] = float(rf_orig->sum_e_cv2[i]);
rf_pred->sum_W[i] = float(rf_orig->sum_w[i]);
}
for (i = R; i < N; i++) {
rf_pred->n_data[i] = 0;
rf_pred->beta[i] = 0;
rf_pred->SSs2[i] = 0;
rf_pred->sum_e_cv2[i] = 0;
rf_pred->sum_W[i] = 0;
}
rf_pred->trustworthy = rf_orig->trustworthy;
rf_pred->beta0 = float(rf_orig->beta0);
rf_pred->nReg = rf_orig->nReg;
rf_pred->SSp = float(rf_orig->SSp);
}
//==============================================================================
//----------------------------CUDA FUNCTIONS------------------------------------
//==============================================================================
__constant__ float U_d[T*CONTROL_DIM];
__constant__ float dm_d[T*M*DERIV_STATE_DIM];
__constant__ float norm_in_d[N];
__device__ void print_vec(float* A, float* B, int n) {
printf("\n\n++++++++++++++++++++++++++++++++++++++++++");
printf("\n Printing A \n");
for (int i = 0; i < n; i++) {
printf(" %f ", A[i]);
}
printf("- \n \n -");
printf("\n Printing B \n");
for (int i = 0; i < n; i++) {
printf(" %f ", B[i]);
}
printf("=================================\n");
}
__device__ void rf_to_shared_mem(RF_Predict *rf_s, RF_Predict *rf_g, int idx) {
//Smaller indices load arrays
if (idx < N*N) {
rf_s->D[idx] = rf_g->D[idx];
}
else if (idx >= N*N && idx < 2*N*N) {
rf_s->U[idx-N*N] = rf_g->U[idx-N*N];
}
else if (idx >= 2*N*N && idx < 3*N*N) {
rf_s->P[idx-2*N*N] = rf_g->P[idx-2*N*N];
}
//Intermediate indices load vectors
else if (idx >= 3*N*N && idx < 3*N*N + N) {
rf_s->c[idx-3*N*N] = rf_g->c[idx-3*N*N];
}
else if (idx >= 3*N*N + N && idx < 3*N*N + 2*N) {
rf_s->mean_x[idx-(3*N*N + N)] = rf_g->mean_x[idx-(3*N*N + N)];
}
else if (idx >= 3*N*N + 2*N && idx < 3*N*N + 3*N) {
rf_s->n_data[idx-(3*N*N + 2*N)] = rf_g->n_data[idx-(3*N*N + 2*N)];
}
else if (idx >= 3*N*N + 3*N && idx < 3*N*N + 4*N) {
rf_s->beta[idx-(3*N*N + 3*N)] = rf_g->beta[idx-(3*N*N + 3*N)];
}
else if (idx >= 3*N*N + 4*N && idx < 3*N*N + 5*N) {
rf_s->SSs2[idx-(3*N*N + 4*N)] = rf_g->SSs2[idx-(3*N*N + 4*N)];
}
else if (idx >= 3*N*N + 5*N && idx < 3*N*N + 6*N) {
rf_s->sum_e_cv2[idx-(3*N*N + 5*N)] = rf_g->sum_e_cv2[idx-(3*N*N + 5*N)];
}
else if (idx >= 3*N*N + 6*N && idx < 3*N*N + 7*N) {
rf_s->sum_W[idx-(3*N*N + 6*N)] = rf_g->sum_W[idx-(3*N*N + 6*N)];
}
//Big indices load scalars
else if (idx == 3*N*N + 7*N) {
rf_s->trustworthy = rf_g->trustworthy;
}
else if (idx == 3*N*N + 7*N + 1) {
rf_s->beta0 = rf_g->beta0;
}
else if (idx == 3*N*N + 7*N + 2) {
rf_s->nReg = rf_g->nReg;
}
else if (idx == 3*N*N + 7*N + 3) {
rf_s->SSp = rf_g->SSp;
}
}
__device__ void compute_proj(int nR, float* s, float* xc, float* U, float* P) {
int i,j;
float dot;
float xu[N];
for (i = 0; i < N; i++) {
xu[i] = xc[i];
}
for (i = 0; i < nR - 1; i++) {
dot = 0;
for (j = 0; j < N; j++) {
dot += U[i*N + j]*xu[j];
}
s[i] = dot;
for (j = 0; j < N; j++) {
xu[j] -= s[i]*P[i*N + j];
}
}
dot = 0;
for (i = 0; i < N; i++) {
dot += U[(nR - 1)*N + i]*xu[i];
}
s[nR - 1] = dot;
}
__device__ void rf_predict(RF_Predict *rf, float* pred_helper, float* x, int index, int t) {
int i,j;
float xc[N];
for (i = 0; i < N; i++) {
xc[i] = x[i] - rf->c[i];
}
float dist = 0;
for (i = 0; i < N; i++) {
float dot = 0;
for (j = 0; j < N; j++) {
dot += rf->D[j*N + i]*xc[j];
}
dist += xc[i]*dot;
}
float w = __expf(-.5*dist);
float yp_n;
float sigma2;
if (w > .001 && rf->trustworthy) {
yp_n = rf->beta0;
sigma2 = 0.0;
for (i = 0; i < N; i++) {
xc[i] = x[i] - rf->mean_x[i];
}
int nR = rf->nReg;
if (rf->n_data[nR-1] <= 2*N) {
nR--;
}
float s[N];
compute_proj(nR, s, xc, rf->U, rf->P);
for (i = 0; i < nR; i++) {
yp_n += s[i]*rf->beta[i];
sigma2 += s[i]*s[i] / rf->SSs2[i];
}
sigma2 = rf->sum_e_cv2[nR-1]/(rf->sum_W[nR-1] - rf->SSp)*(1+w*sigma2);
pred_helper[0] = yp_n*w;
pred_helper[1] = w;
pred_helper[2] = w*yp_n*yp_n;
pred_helper[3] = w*sigma2;
}
else {
pred_helper[0] = 0;
pred_helper[1] = 0;
pred_helper[2] = 0;
pred_helper[3] = 0;
}
}
__device__ void compute_predict_conf(RF_Predict* rfs, float* x, int numRFS, float* vals, int t) {
int i;
float pred_helper[] = {0,0,0,0};
float sum_wy = 0;
float sum_w = 0;
float sum_wyy = 0;
float sum_conf = 0;
__shared__ RF_Predict rf_s0;
__shared__ RF_Predict rf_s1;
__shared__ RF_Predict rf_s2;
__shared__ RF_Predict rf_s3;
__shared__ RF_Predict rf_s4;
__shared__ RF_Predict rf_s5;
__shared__ RF_Predict rf_s6;
__shared__ RF_Predict rf_s7;
int tot_el = 3*N*N + 7*N + 4;
int idx = threadIdx.x*M + threadIdx.y;
for (i = 0; i < numRFS; i+= 7) {
__syncthreads();
if (idx < tot_el && i < numRFS) {
rf_to_shared_mem(&rf_s0, &rfs[i], idx);
}
else if (idx >= tot_el && idx < 2*tot_el && i + 1 < numRFS) {
rf_to_shared_mem(&rf_s1, &rfs[i+1], idx - tot_el);
}
else if (idx >= 2*tot_el && idx < 3*tot_el && i + 2 < numRFS) {
rf_to_shared_mem(&rf_s2, &rfs[i+2], idx - 2*tot_el);
}
else if (idx >= 3*tot_el && idx < 4*tot_el && i + 3 < numRFS) {
rf_to_shared_mem(&rf_s3, &rfs[i+3], idx - 3*tot_el);
}
else if (idx >= 4*tot_el && idx < 5*tot_el && i + 4 < numRFS) {
rf_to_shared_mem(&rf_s4, &rfs[i+4], idx - 4*tot_el);
}
else if (idx >= 5*tot_el && idx < 6*tot_el && i + 5 < numRFS) {
rf_to_shared_mem(&rf_s5, &rfs[i+5], idx - 5*tot_el);
}
else if (idx >= 6*tot_el && idx < 7*tot_el && i + 6 < numRFS) {
rf_to_shared_mem(&rf_s6, &rfs[i+6], idx - 6*tot_el);
}
else if (idx >= 7*tot_el && idx < 8*tot_el && i + 7 < numRFS) {
rf_to_shared_mem(&rf_s7, &rfs[i+7], idx - 7*tot_el);
}
__syncthreads();
rf_predict(&rf_s0, pred_helper, x, i, t);
sum_wy += pred_helper[0];
sum_w += pred_helper[1];
sum_wyy += pred_helper[2];
sum_conf += pred_helper[3];
if (i + 1 < numRFS) {
rf_predict(&rf_s1, pred_helper, x, i+1, t);
sum_wy += pred_helper[0];
sum_w += pred_helper[1];
sum_wyy += pred_helper[2];
sum_conf += pred_helper[3];
}
if (i + 2 < numRFS) {
rf_predict(&rf_s2, pred_helper, x, i+2, t);
sum_wy += pred_helper[0];
sum_w += pred_helper[1];
sum_wyy += pred_helper[2];
sum_conf += pred_helper[3];
}
if (i + 3 < numRFS) {
rf_predict(&rf_s3, pred_helper, x, i+3, t);
sum_wy += pred_helper[0];
sum_w += pred_helper[1];
sum_wyy += pred_helper[2];
sum_conf += pred_helper[3];
}
if (i + 4 < numRFS) {
rf_predict(&rf_s4, pred_helper, x, i+4, t);
sum_wy += pred_helper[0];
sum_w += pred_helper[1];
sum_wyy += pred_helper[2];
sum_conf += pred_helper[3];
}
if (i + 5 < numRFS) {
rf_predict(&rf_s5, pred_helper, x, i+5, t);
sum_wy += pred_helper[0];
sum_w += pred_helper[1];
sum_wyy += pred_helper[2];
sum_conf += pred_helper[3];
}
if (i + 6 < numRFS) {
rf_predict(&rf_s6, pred_helper, x, i+6, t);
sum_wy += pred_helper[0];
sum_w += pred_helper[1];
sum_wyy += pred_helper[2];
sum_conf += pred_helper[3];
}
if (i + 7 < numRFS) {
rf_predict(&rf_s7, pred_helper, x, i+7, t);
sum_wy += pred_helper[0];
sum_w += pred_helper[1];
sum_wyy += pred_helper[2];
sum_conf += pred_helper[3];
}
}
if (sum_w > 0) {
vals[0] = sum_wy/sum_w;
vals[1] = fmin((float)sqrt(fabs(sum_conf + sum_wyy - sum_wy*vals[0]))/sum_w, (float)MAX_VAR);
}
else {
vals[0] = 0;
vals[1] = MAX_VAR;
}
}
//Enforces constraints for the robot
__device__ void enforce_constraints(float* s) {
if (s[0] > 10.0) {
s[0] = 10.0;
}
else if (s[0] < -10.0) {
s[0] = -10.0;
}
if (s[1] > 10.0) {
s[1] = 10.0;
}
else if (s[1] < -10.0) {
s[1] = -10.0;
}
if (s[2] > 3.14) {
s[2] = -3.14;
}
else if (s[2] < -3.14) {
s[2] = 3.14;
}
if (s[3] > .5) {
s[3] = .5;
}
else if (s[3] < -.5) {
s[3] = -.5;
}
if (s[4] > .5) {
s[4] = .5;
}
else if (s[4] < -.5) {
s[4] = -.5;
}
}
__device__ void compute_dynamics(float* s, float* u, float* lwpr_input, RF_Predict* rfs1, RF_Predict* rfs2,
RF_Predict* rfs3, float* sigmas, int timestep, int numRFS1, int numRFS2, int numRFS3)
{
float dt = 1.0/(1.0*HZ);
//------Problem Specific------------
float vals[2];
//Normalize according to norm_in_d, note that all lwpr models
//have the same input, hence the same norm_in, and same input.
lwpr_input[0] = s[0]/norm_in_d[0];
lwpr_input[1] = s[1]/norm_in_d[1];
lwpr_input[2] = s[2]/norm_in_d[2];
lwpr_input[3] = s[3]/norm_in_d[3];
lwpr_input[4] = s[4]/norm_in_d[4];
//Compute the first prediction
compute_predict_conf(rfs1, lwpr_input, numRFS1, vals, timestep);
s[0] += dt*(vals[0] + vals[1]*dm_d[T*DERIV_STATE_DIM*threadIdx.y + DERIV_STATE_DIM*timestep]);
sigmas[0] = vals[1];
//Compute second prediction
compute_predict_conf(rfs2, lwpr_input, numRFS2, vals, timestep);
s[1] += dt*(vals[0] + vals[1]*dm_d[T*DERIV_STATE_DIM*threadIdx.y + DERIV_STATE_DIM*timestep + 1]);
sigmas[1] = vals[1];
//Compute third prediction
compute_predict_conf(rfs3, lwpr_input, numRFS3, vals, timestep);
s[2] += dt*(vals[0] + vals[1]*dm_d[T*DERIV_STATE_DIM*threadIdx.y + DERIV_STATE_DIM*timestep + 2]);
sigmas[2] = vals[1];
//Low pass filter controls
s[3] += dt*((u[0] + u[1]) - s[3]);
s[4] += dt*((u[0] - u[1]) - s[4]);
//Make sure all constraints are satisfied
enforce_constraints(s);
}
//Computes the immediate cost according to the PI^2 framework.
//TODO: Add control cost and anti-biasing term.
__device__ float compute_cost(float* s, float* u, float* goal, float* sigmas)
{
float d1 = (s[0] - goal[0]);
float d2 = (s[1] - goal[1]);
float cost = d1*d1 + d2*d2;
return cost;
}
__global__ void rollout_kernel(float* aug_state_costs_d, float* state_d, float* goal_d, RF_Predict* rfs1,
RF_Predict* rfs2, RF_Predict* rfs3, float* du_d, float* vars_d,
int numRFS1, int numRFS2, int numRFS3)
{
int tdx = threadIdx.x;
int tdy = threadIdx.y;
int bdx = blockIdx.x;
if (blockDim.x*bdx+tdx < K) {
//Initialize the local state
float s[STATE_DIM];
float u[CONTROL_DIM];
float lwpr_input[N];
float vars[CONTROL_DIM];
float sigmas[DERIV_STATE_DIM];
int i,j;
//Load the initial state
for (i = 0; i < STATE_DIM; i++) {
s[i] = state_d[i];
}
//Load vars
for (i = 0; i < CONTROL_DIM; i++) {
vars[i] = vars_d[i];
}
for (i = 0; i < T; i++) {
//Start the main program loop
for (j = 0; j < CONTROL_DIM; j++) {
if (bdx == 0 && tdx == 0) {
u[j] = U_d[i*CONTROL_DIM + j];
}
else {
u[j] = U_d[i*CONTROL_DIM + j] + du_d[CONTROL_DIM*T*(blockDim.x*bdx + tdx) + i*CONTROL_DIM + j]*vars[j];
}
}
//Check to see if the control commands are allowable
compute_dynamics(s, u, lwpr_input, rfs1, rfs2, rfs3, sigmas, i, numRFS1, numRFS2, numRFS3);
float inst_cost = compute_cost(s,u,goal_d, sigmas);
aug_state_costs_d[M*T*((blockDim.x)*bdx + tdx) + T*tdy + i] = inst_cost;
}
}
}
__global__ void expec_costs_kernel(float* state_costs_d, float* aug_state_costs_d)
{
int tdx = threadIdx.x;
int bdx = blockIdx.x;
float expec_cost = 0;
int i;
if (tdx < T && bdx < K) {
for (i = 0; i < M; i++) {
expec_cost += aug_state_costs_d[M*T*bdx + T*i + tdx];
}
state_costs_d[T*bdx + tdx] = expec_cost/(1.0*M);
}
}
__global__ void norm_exp_costs_kernel(float* state_costs_d)
{
int tdx = threadIdx.x;
int bdx = blockIdx.x;
int index = blockDim.x*bdx + tdx;
if (index < K) {
float cost2go = 0;
float nf_normal = 0;
int i;
for (i = T-1; i >= 0; i--) {
cost2go += state_costs_d[T*index + i];
nf_normal += state_costs_d[i];
state_costs_d[T*index + i] = __expf(-10.0*cost2go/nf_normal);
}
}
}
//=========================================================================================
//--------------------------------END CUDA------------------------------------------------
//========================================================================================
void compute_control(float* state, float* U, float* goal, LWPR_Model model1, LWPR_Model model2,
LWPR_Model model3, float* vars, curandGenerator_t gen) {
//Timing Code
cudaEvent_t start, stop;
float time;
cudaEventCreate(&start);
cudaEventCreate(&stop);
//First we create du_d, perturbations of U which reside in device memory.
float* du_d;
HANDLE_ERROR( cudaMalloc((void**)&du_d, K*T*CONTROL_DIM*sizeof(float)));
curandGenerateNormal(gen, du_d, K*T*CONTROL_DIM, 0.0, 1.0);
//Next we create dm_d perturbations of the LWPR model in device memory
float* dm_temp;
HANDLE_ERROR( cudaMalloc((void**)&dm_temp, M*T*DERIV_STATE_DIM*sizeof(float)));
curandGenerateNormal(gen, dm_temp, M*T*DERIV_STATE_DIM, 0.0, 1.0);
HANDLE_ERROR( cudaMemcpyToSymbol(dm_d, dm_temp, M*T*DERIV_STATE_DIM*sizeof(float), 0, cudaMemcpyDeviceToDevice));
cudaFree(dm_temp);
//Create pointers for state, U, goal, rfs1, rfs2, and vars in device memory
float* state_d;
float* goal_d;
float* vars_d;
//Transfer relevant data from host LWPR model to device LWPR Receptive Field
int i,j;
RF_Predict* rfs1;
RF_Predict* rfs2;
RF_Predict* rfs3;
rfs1 = (RF_Predict*)malloc(model1.sub[0].numRFS*sizeof(RF_Predict));
rfs2 = (RF_Predict*)malloc(model2.sub[0].numRFS*sizeof(RF_Predict));
rfs3 = (RF_Predict*)malloc(model3.sub[0].numRFS*sizeof(RF_Predict));
for (i = 0; i < model1.sub[0].numRFS; i++) {
rfTransfer(model1.sub[0].rf[i], &rfs1[i], model1.nInStore);
}
for (i = 0; i < model2.sub[0].numRFS; i++) {
rfTransfer(model2.sub[0].rf[i], &rfs2[i], model2.nInStore);
}
for (i = 0; i < model3.sub[0].numRFS; i++) {
rfTransfer(model3.sub[0].rf[i], &rfs3[i], model3.nInStore);
}
//Transfer norms to float arrays
float norm_in[N];
for (i = 0; i < N; i++) {
norm_in[i] = float(model1.norm_in[i]);
}
//Create device pointers for rfs1, rfs2, norm_in1, and norm_in2
RF_Predict* rfs1_d;
RF_Predict* rfs2_d;
RF_Predict* rfs3_d;
//Allocate space for state, U, goal, rfs1, rfs2, and vars in device memory
HANDLE_ERROR( cudaMalloc((void**)&state_d, STATE_DIM*sizeof(float)));
HANDLE_ERROR( cudaMalloc((void**)&goal_d, STATE_DIM*sizeof(float)));
HANDLE_ERROR( cudaMalloc((void**)&vars_d, CONTROL_DIM*sizeof(float)));
HANDLE_ERROR( cudaMalloc((void**)&rfs1_d, model1.sub[0].numRFS*sizeof(RF_Predict)));
HANDLE_ERROR( cudaMalloc((void**)&rfs2_d, model2.sub[0].numRFS*sizeof(RF_Predict)));
HANDLE_ERROR( cudaMalloc((void**)&rfs3_d, model3.sub[0].numRFS*sizeof(RF_Predict)));
//Copy state, U, goal, model1, and model2 into device memory
HANDLE_ERROR( cudaMemcpy(state_d, state, STATE_DIM*sizeof(float), cudaMemcpyHostToDevice));
HANDLE_ERROR( cudaMemcpyToSymbol(U_d, U, CONTROL_DIM*T*sizeof(float), 0, cudaMemcpyHostToDevice));
HANDLE_ERROR( cudaMemcpy(goal_d, goal, STATE_DIM*sizeof(float), cudaMemcpyHostToDevice));
HANDLE_ERROR( cudaMemcpy(vars_d, vars, CONTROL_DIM*sizeof(float), cudaMemcpyHostToDevice));
HANDLE_ERROR( cudaMemcpy(rfs1_d, rfs1, model1.sub[0].numRFS*sizeof(RF_Predict), cudaMemcpyHostToDevice));
HANDLE_ERROR( cudaMemcpy(rfs2_d, rfs2, model2.sub[0].numRFS*sizeof(RF_Predict), cudaMemcpyHostToDevice));
HANDLE_ERROR( cudaMemcpy(rfs3_d, rfs3, model3.sub[0].numRFS*sizeof(RF_Predict), cudaMemcpyHostToDevice));
HANDLE_ERROR( cudaMemcpyToSymbol(norm_in_d, norm_in, N*sizeof(float), 0, cudaMemcpyHostToDevice));
//Allocate space for the state costs and new controls
//For the raw state costs
float* aug_state_costs_d;
HANDLE_ERROR( cudaMalloc((void**)&aug_state_costs_d, T*K*M*sizeof(float)));
//For the averaged state costs
float* state_costs_d;
//For controls we just re-use du_d
HANDLE_ERROR( cudaMalloc((void**)&state_costs_d, T*K*sizeof(float)));
//Now we set the grid and block size
int xBlockSize = (BLOCKSIZE-1)/M + 1;
int yBlockSize = M;
int xGridSize = (K-1)/xBlockSize + 1;
dim3 dimBlock1(xBlockSize, yBlockSize, 1);
dim3 dimGrid1(xGridSize, 1, 1);
cudaEventRecord(start, 0);
//Now we launch the kernel to compute the new control
rollout_kernel<<<dimGrid1, dimBlock1>>>(aug_state_costs_d, state_d, goal_d, rfs1_d, rfs2_d, rfs3_d, du_d, vars_d, model1.sub[0].numRFS, model2.sub[0].numRFS, model3.sub[0].numRFS);
cudaEventRecord(stop, 0);
cudaEventSynchronize(stop);
cudaDeviceSynchronize();
//Wait until the kernel has finished
dim3 dimBlock2(T, 1, 1);
dim3 dimGrid2(K, 1, 1);
//Compute expectation of the costs
expec_costs_kernel<<<dimGrid2, dimBlock2>>>(state_costs_d, aug_state_costs_d);
cudaDeviceSynchronize();
dim3 dimBlock3(64, 1, 1);
dim3 dimGrid3((K-1)/64 + 1, 1, 1);
//Now we normalize the cost-to-go by the noise free path, and exponentiate by the -lambda*cost2go
norm_exp_costs_kernel<<<dimGrid3, dimBlock3>>>(state_costs_d);
cudaDeviceSynchronize();
//Compute the normalizer
//For now just do it on the CPU
//Transfer state costs to host memory
float* state_costs;
state_costs = (float*)malloc(T*K*sizeof(float));
HANDLE_ERROR( cudaMemcpy(state_costs, state_costs_d, T*K*sizeof(float), cudaMemcpyDeviceToHost));
//Now compute the normalizer
float* normalizer;
normalizer = (float*)malloc(T*sizeof(float));
for (i = 0; i < T; i++) {
normalizer[i] = 0;
for (j = 0; j < K; j++) {
normalizer[i] += state_costs[T*j + i];
}
}
//Compute the new controls
//Just do on CPU for now
//First transfer controls to host memory
float* du;
du = (float*)malloc(T*K*CONTROL_DIM*sizeof(float));
HANDLE_ERROR( cudaMemcpy(du, du_d, T*K*CONTROL_DIM*sizeof(float), cudaMemcpyDeviceToHost));
//Now compute the new control and place it in U
float* U_new;
U_new = (float*)malloc(T*CONTROL_DIM*sizeof(float));
for (i = 0; i < T; i++) {
U_new[CONTROL_DIM*i] = (state_costs[i]/normalizer[i])*U[CONTROL_DIM*i];
U_new[CONTROL_DIM*i + 1] = (state_costs[i]/normalizer[i])*U[CONTROL_DIM*i + 1];
for (j = 0; j < K; j++) {
float u1 = U[CONTROL_DIM*i] + du[T*CONTROL_DIM*j + CONTROL_DIM*i]*vars[0];
float u2 = U[CONTROL_DIM*i + 1] + du[T*CONTROL_DIM*j + CONTROL_DIM*i + 1]*vars[1];
float u_max = .5;
float u_min = -.5;
u1 = fmin(u1, u_max);
u1 = fmax(u1, u_min);
u2 = fmin(u2, u_max);
u2 = fmax(u2, u_min);
U_new[CONTROL_DIM*i] += (state_costs[T*j + i]/normalizer[i])*u1;
U_new[CONTROL_DIM*i + 1] += (state_costs[T*j + i]/normalizer[i])*u2;
}
U[i*CONTROL_DIM] = U_new[i*CONTROL_DIM];
U[i*CONTROL_DIM + 1] = U_new[i*CONTROL_DIM + 1];
}
//Free device arrays
cudaFree(state_d);
cudaFree(goal_d);
cudaFree(rfs1_d);
cudaFree(rfs2_d);
cudaFree(rfs3_d);
cudaFree(du_d);
cudaFree(state_costs_d);
cudaFree(aug_state_costs_d);
cudaFree(vars_d);
//Free host arrays
free(rfs1);
free(rfs2);
free(rfs3);
free(state_costs);
free(du);
free(normalizer);
//Print timing results
cudaEventElapsedTime(&time, start, stop);
printf("Kernel Time: %f ms \n", time);
}
void dynamics(float* s, float* u, float dt) {
s[0] += dt*(s[3] + 1.1*s[4])/2.0*cos(s[2]);
s[1] += dt*(s[3] + 1.1*s[4])/2.0*sin(s[2]);
s[2] += dt*(s[3] - 1.1*s[4])/.258;
s[3] += dt*((u[0] + u[1]) - s[3]);
s[4] += dt*((u[0] - u[1]) - s[4]);
if (s[0] > 10.0) {
s[0] = 10.0;
}
else if (s[0] < -10.0) {
s[0] = -10.0;
}
if (s[1] > 10.0) {
s[1] = 10.0;
}
else if (s[1] < -10.0) {
s[1] = -10.0;
}
if (s[2] > 3.14) {
s[2] = -3.14;
}
else if (s[2] < -3.14) {
s[2] = 3.14;
}
if (s[3] > .5) {
s[3] = .5;
}
else if (s[3] < -.5) {
s[3] = -.5;
}
if (s[4] > .5) {
s[4] = .5;
}
else if (s[4] < -.5) {
s[4] = -.5;
}
}
int main() {
LWPR_Model model1;
LWPR_Model model2;
LWPR_Model model3;
char x_dot[] = {'t', 'r', 'a', 'j', '_', 'x', '.', 'x', 'm', 'l', '\0'};
char y_dot[] = {'t', 'r', 'a', 'j', '_', 'y', '.', 'x', 'm', 'l', '\0'};
char theta_dot[] = {'t', 'h', 'e', 't', 'a', '.', 'x', 'm', 'l', '\0'};
int e1[] = {-3};
int e2[] = {-3};
int e3[] = {-3};
lwpr_init_model(&model1, 5, 1, "x");
lwpr_init_model(&model2, 5, 1, "y");
lwpr_init_model(&model3, 5, 1, "theta");
//lwpr_read_xml(&model1, x_dot, e1);
//lwpr_read_xml(&model2, y_dot, e2);
//lwpr_read_xml(&model3, theta_dot, e3);
printf("%d %d %d", e1[0], e2[0], e3[0]);
float U[T*CONTROL_DIM] = {0};
float u[CONTROL_DIM] = {0};
float s[STATE_DIM] = {0};
float goal[] = {2.0, 2.0, 0, 0, 0};
float vars[] = {.50, .25};
curandGenerator_t gen;
float dt = (1.0)/(1.0*HZ);
curandCreateGenerator(&gen, CURAND_RNG_PSEUDO_DEFAULT);
curandSetPseudoRandomGeneratorSeed(gen, 1234ULL);
int i,j;
for (j = 0; j < 2500; j++) {
compute_control(s, U, goal, model1, model2, model3, vars, gen);
u[0] = U[0];
u[1] = U[1];
for (i = 0; i < (T-1)*CONTROL_DIM; i++) {
U[i] = U[i+CONTROL_DIM];
}
U[T-2] = 0;
U[T-1] = 0;
double lwpr_input[5] = {s[0], s[1], s[2], s[3], s[4]};
double out1 = s[0];
double out2 = s[1];
double out3 = s[2];
dynamics(s, u, dt);
out1 = (s[0] - out1)/dt;
out2 = (s[1] - out2)/dt;
out3 = (s[2] - out3)/dt;
lwpr_update(&model1, lwpr_input, &out1, NULL, NULL);
lwpr_update(&model2, lwpr_input, &out2, NULL, NULL);
lwpr_update(&model3, lwpr_input, &out3, NULL, NULL);
printf("Current Location: (%f, %f, %f, %f, %f,) \n", s[0], s[1], s[2], s[3], s[4]);
}
//Save the LWPR models
char xn_dot[] = {'x', 'n', '.', 'x', 'm', 'l', '\0'};
char yn_dot[] = {'y', 'n', '.', 'x', 'm', 'l', '\0'};
char thetan_dot[] = {'t', 'h', 'e', 't', 'a', 'n', '.', 'x', 'm', 'l', '\0'};
lwpr_write_xml(&model1, xn_dot);
lwpr_write_xml(&model2, yn_dot);
lwpr_write_xml(&model3, thetan_dot);
}
|
0812408491ee34947a85de425f6a78644b03533a.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "precomp.cuh"
#include "bitboard.h"
#include "gpuminimax.h"
namespace Checkers
{
namespace GPUMinimax
{
__device__ utility_type explore_white_frontier(GPUBitBoard board, utility_type alpha, utility_type beta, NodeType node_type, int depth, int turns)
{
GPUBitBoard frontier[32];
int frontier_size = 0;
int v = (node_type == NodeType::MAX) ? -Infinity : Infinity;
int gen_board_type;
utility_type terminal_value = 0;
if (GetWhiteUtility(board, terminal_value, depth, turns))
{
return terminal_value;
}
if (node_type == NodeType::MAX)
{
gen_board_type = (GPUBitBoard::GetWhiteJumps(board) != 0) ? 1 : 0;
}
else
{
gen_board_type = (GPUBitBoard::GetBlackJumps(board) != 0) ? 1 : 0;
}
if (node_type == NodeType::MAX)
{
// if dynamic parallelism is possible, can call another kernel here
for (int i = 0; i < 32; ++i)
{
gen_white_move[gen_board_type](1u << i, board, frontier, frontier_size);
}
for (int j = 0; j < frontier_size; ++j)
{
v = GET_MAX(explore_white_frontier(frontier[j], alpha, beta, node_type + 1, depth - 1, turns - 1), v);
if (v > beta)
{
break;
}
alpha = GET_MAX(alpha, v);
}
}
else
{
// if dynamic parallelism is possible, can call another kernel here
for (int i = 0; i < 32; ++i)
{
gen_black_move[gen_board_type](1u << i, board, frontier, frontier_size);
}
for (int j = 0; j < frontier_size; ++j)
{
v = GET_MIN(explore_white_frontier(frontier[j], alpha, beta, node_type + 1, depth - 1, turns - 1), v);
if (v < alpha)
{
break;
}
beta = GET_MIN(beta, v);
}
}
return v;
}
__global__ void white_kernel(utility_type *v, GPUBitBoard const *boards, int num_boards, utility_type alpha, utility_type beta, NodeType node_type, int depth, int turns)
{
int tx = threadIdx.x;
int bx = blockIdx.x;
__shared__ int frontier_size;
__shared__ int gen_board_type;
__shared__ GPUBitBoard frontier[32];
__shared__ utility_type t_v[32];
__shared__ bool terminated;
if (tx == 0)
{
frontier_size = 0;
utility_type terminal_value = 0;
if (terminated = GetWhiteUtility(boards[bx], terminal_value, depth, turns))
{
v[bx] = terminal_value;
}
else
{
if ((node_type + 1) == NodeType::MAX)
{
gen_board_type = (GPUBitBoard::GetWhiteJumps(boards[bx]) != 0) ? 1 : 0;
}
else
{
gen_board_type = (GPUBitBoard::GetBlackJumps(boards[bx]) != 0) ? 1 : 0;
}
}
}
__syncthreads();
if (!terminated)
{
if ((node_type + 1) == NodeType::MAX)
{
gen_white_move_atomic[gen_board_type](1u << tx, boards[bx], frontier, &frontier_size);
}
else
{
gen_black_move_atomic[gen_board_type](1u << tx, boards[bx], frontier, &frontier_size);
}
__syncthreads();
if (tx < frontier_size)
{
t_v[tx] = explore_white_frontier(frontier[tx], alpha, beta, node_type + 2, depth - 1, turns - 1);
}
else
{
t_v[tx] = node_type == NodeType::MAX ? Infinity : -Infinity;
}
__syncthreads();
if ((node_type + 1) == NodeType::MAX)
{
for (int i = 1; i < 32; i *= 2)
{
if (tx + i < 32)
{
t_v[tx] = GET_MAX(t_v[tx], t_v[tx + i]);
}
}
}
else
{
for (int i = 1; i < 32; i *= 2)
{
if (tx + i < 32)
{
t_v[tx] = GET_MIN(t_v[tx], t_v[tx + i]);
}
}
}
__syncthreads();
if (tx == 0)
{
v[bx] = t_v[tx];
}
}
__syncthreads();
if (bx == 0)
{
if (tx < num_boards)
{
t_v[tx] = v[tx];
}
else
{
t_v[tx] = node_type == NodeType::MAX ? -Infinity : Infinity;
}
__syncthreads();
if ((node_type) == NodeType::MAX)
{
for (int i = 1; i < 32; i *= 2)
{
if (tx + i < 32)
{
t_v[tx] = GET_MAX(t_v[tx], t_v[tx + i]);
}
}
}
else
{
for (int i = 1; i < 32; i *= 2)
{
if (tx + i < 32)
{
t_v[tx] = GET_MIN(t_v[tx], t_v[tx + i]);
}
}
}
__syncthreads();
if (tx < num_boards)
{
v[tx] = t_v[tx];
}
}
__syncthreads();
}
}
} | 0812408491ee34947a85de425f6a78644b03533a.cu | #include "precomp.cuh"
#include "bitboard.h"
#include "gpuminimax.h"
namespace Checkers
{
namespace GPUMinimax
{
__device__ utility_type explore_white_frontier(GPUBitBoard board, utility_type alpha, utility_type beta, NodeType node_type, int depth, int turns)
{
GPUBitBoard frontier[32];
int frontier_size = 0;
int v = (node_type == NodeType::MAX) ? -Infinity : Infinity;
int gen_board_type;
utility_type terminal_value = 0;
if (GetWhiteUtility(board, terminal_value, depth, turns))
{
return terminal_value;
}
if (node_type == NodeType::MAX)
{
gen_board_type = (GPUBitBoard::GetWhiteJumps(board) != 0) ? 1 : 0;
}
else
{
gen_board_type = (GPUBitBoard::GetBlackJumps(board) != 0) ? 1 : 0;
}
if (node_type == NodeType::MAX)
{
// if dynamic parallelism is possible, can call another kernel here
for (int i = 0; i < 32; ++i)
{
gen_white_move[gen_board_type](1u << i, board, frontier, frontier_size);
}
for (int j = 0; j < frontier_size; ++j)
{
v = GET_MAX(explore_white_frontier(frontier[j], alpha, beta, node_type + 1, depth - 1, turns - 1), v);
if (v > beta)
{
break;
}
alpha = GET_MAX(alpha, v);
}
}
else
{
// if dynamic parallelism is possible, can call another kernel here
for (int i = 0; i < 32; ++i)
{
gen_black_move[gen_board_type](1u << i, board, frontier, frontier_size);
}
for (int j = 0; j < frontier_size; ++j)
{
v = GET_MIN(explore_white_frontier(frontier[j], alpha, beta, node_type + 1, depth - 1, turns - 1), v);
if (v < alpha)
{
break;
}
beta = GET_MIN(beta, v);
}
}
return v;
}
__global__ void white_kernel(utility_type *v, GPUBitBoard const *boards, int num_boards, utility_type alpha, utility_type beta, NodeType node_type, int depth, int turns)
{
int tx = threadIdx.x;
int bx = blockIdx.x;
__shared__ int frontier_size;
__shared__ int gen_board_type;
__shared__ GPUBitBoard frontier[32];
__shared__ utility_type t_v[32];
__shared__ bool terminated;
if (tx == 0)
{
frontier_size = 0;
utility_type terminal_value = 0;
if (terminated = GetWhiteUtility(boards[bx], terminal_value, depth, turns))
{
v[bx] = terminal_value;
}
else
{
if ((node_type + 1) == NodeType::MAX)
{
gen_board_type = (GPUBitBoard::GetWhiteJumps(boards[bx]) != 0) ? 1 : 0;
}
else
{
gen_board_type = (GPUBitBoard::GetBlackJumps(boards[bx]) != 0) ? 1 : 0;
}
}
}
__syncthreads();
if (!terminated)
{
if ((node_type + 1) == NodeType::MAX)
{
gen_white_move_atomic[gen_board_type](1u << tx, boards[bx], frontier, &frontier_size);
}
else
{
gen_black_move_atomic[gen_board_type](1u << tx, boards[bx], frontier, &frontier_size);
}
__syncthreads();
if (tx < frontier_size)
{
t_v[tx] = explore_white_frontier(frontier[tx], alpha, beta, node_type + 2, depth - 1, turns - 1);
}
else
{
t_v[tx] = node_type == NodeType::MAX ? Infinity : -Infinity;
}
__syncthreads();
if ((node_type + 1) == NodeType::MAX)
{
for (int i = 1; i < 32; i *= 2)
{
if (tx + i < 32)
{
t_v[tx] = GET_MAX(t_v[tx], t_v[tx + i]);
}
}
}
else
{
for (int i = 1; i < 32; i *= 2)
{
if (tx + i < 32)
{
t_v[tx] = GET_MIN(t_v[tx], t_v[tx + i]);
}
}
}
__syncthreads();
if (tx == 0)
{
v[bx] = t_v[tx];
}
}
__syncthreads();
if (bx == 0)
{
if (tx < num_boards)
{
t_v[tx] = v[tx];
}
else
{
t_v[tx] = node_type == NodeType::MAX ? -Infinity : Infinity;
}
__syncthreads();
if ((node_type) == NodeType::MAX)
{
for (int i = 1; i < 32; i *= 2)
{
if (tx + i < 32)
{
t_v[tx] = GET_MAX(t_v[tx], t_v[tx + i]);
}
}
}
else
{
for (int i = 1; i < 32; i *= 2)
{
if (tx + i < 32)
{
t_v[tx] = GET_MIN(t_v[tx], t_v[tx + i]);
}
}
}
__syncthreads();
if (tx < num_boards)
{
v[tx] = t_v[tx];
}
}
__syncthreads();
}
}
} |
ffdca4c7565efb21c456864f5237fb03a5fd3f3c.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
//Udacity HW 6
//Poisson Blending
/* Background
==========
The goal for this assignment is to take one image (the source) and
paste it into another image (the destination) attempting to match the
two images so that the pasting is non-obvious. This is
known as a "seamless clone".
The basic ideas are as follows:
1) Figure out the interior and border of the source image
2) Use the values of the border pixels in the destination image
as boundary conditions for solving a Poisson equation that tells
us how to blend the images.
No pixels from the destination except pixels on the border
are used to compute the match.
Solving the Poisson Equation
============================
There are multiple ways to solve this equation - we choose an iterative
method - specifically the Jacobi method. Iterative methods start with
a guess of the solution and then iterate to try and improve the guess
until it stops changing. If the problem was well-suited for the method
then it will stop and where it stops will be the solution.
The Jacobi method is the simplest iterative method and converges slowly -
that is we need a lot of iterations to get to the answer, but it is the
easiest method to write.
Jacobi Iterations
=================
Our initial guess is going to be the source image itself. This is a pretty
good guess for what the blended image will look like and it means that
we won't have to do as many iterations compared to if we had started far
from the final solution.
ImageGuess_prev (Floating point)
ImageGuess_next (Floating point)
DestinationImg
SourceImg
Follow these steps to implement one iteration:
1) For every pixel p in the interior, compute two sums over the four neighboring pixels:
Sum1: If the neighbor is in the interior then += ImageGuess_prev[neighbor]
else if the neighbor in on the border then += DestinationImg[neighbor]
Sum2: += SourceImg[p] - SourceImg[neighbor] (for all four neighbors)
2) Calculate the new pixel value:
float newVal= (Sum1 + Sum2) / 4.f <------ Notice that the result is FLOATING POINT
ImageGuess_next[p] = min(255, max(0, newVal)); //clamp to [0, 255]
In this assignment we will do 800 iterations.
*/
#include "utils.h"
#include <thrust/host_vector.h>
#include "reference_calc_custom.h"
#define BLOCK_SIZE_CALC_MASK_MAX_X 22
#define BLOCK_SIZE_CALC_MASK_MAX_Y 22
#define ENABLE_DEBUG
#if defined(ENABLE_DEBUG)
#define DEBUG_COLLECT_SHARED_DATA_BLOCK_X 0
#define DEBUG_COLLECT_SHARED_DATA_BLOCK_Y 0
#endif
// #define ENABLE_STRICT_ERROR_CHECKING
__global__
void calculateMaskKernel(
const uchar4 * const d_sourceImg,
const size_t numRowsSource,
const size_t numColsSource,
unsigned char* d_mask,
unsigned char* d_border,
unsigned char* d_interior
#if defined (ENABLE_DEBUG)
, uchar4 * d_shared_for_debug
#endif
)
{
#define MASK_KERNEL_USE_SHARED
#if defined (MASK_KERNEL_USE_SHARED)
__shared__ uchar4 _shared[BLOCK_SIZE_CALC_MASK_MAX_X + 2][BLOCK_SIZE_CALC_MASK_MAX_Y + 2];
#endif
const int2 threadPos2D =
make_int2(
blockIdx.x * blockDim.x + threadIdx.x,
blockIdx.y * blockDim.y + threadIdx.y);
if ( threadPos2D.x >= numColsSource ||
threadPos2D.y >= numRowsSource )
{
return;
}
else
{
const int myId =
threadPos2D.y * numColsSource +
threadPos2D.x;
#if defined (MASK_KERNEL_USE_SHARED)
int myIdTop =
(threadPos2D.y - 1) * numColsSource +
threadPos2D.x;
int myIdBottom =
(threadPos2D.y + 1) * numColsSource +
threadPos2D.x;
int myIdLeft =
threadPos2D.y * numColsSource +
(threadPos2D.x - 1);
int myIdRight =
threadPos2D.y * numColsSource +
(threadPos2D.x + 1);
// Top left thread fetches top left neighbor (if available)
if ((threadIdx.x == 0) &&
(threadIdx.y == 0))
{
if ((threadPos2D.x > 0) &&
(threadPos2D.y > 0))
{
_shared[threadIdx.x][threadIdx.y] =
d_sourceImg[myIdTop - 1];
}
else
{
_shared[threadIdx.x][threadIdx.y] =
make_uchar4(
255,
255,
255,
255);
}
}
__syncthreads();
// Top row fetches all top neighbors
if (threadIdx.y == 0)
{
if (threadPos2D.y > 0)
{
_shared[threadIdx.x + 1][threadIdx.y] =
d_sourceImg[myIdTop];
}
else
{
_shared[threadIdx.x + 1][threadIdx.y] =
make_uchar4(
255,
255,
255,
255);
}
}
__syncthreads();
// Top right thread fetches top right neighbor (if available)
if (((threadIdx.x == (blockDim.x - 1)) ||
(threadPos2D.x == (numColsSource - 1))) &&
(threadIdx.y == 0))
{
if ((threadPos2D.x < (numColsSource - 1)) &&
(threadPos2D.y > 0))
{
_shared[threadIdx.x + 2][threadIdx.y] =
d_sourceImg[myIdTop + 1];
}
else
{
_shared[threadIdx.x + 2][threadIdx.y] =
make_uchar4(
255,
255,
255,
255);
}
}
__syncthreads();
// Left column fetches all left neighbors
if ((threadIdx.x == 0) &&
(threadPos2D.y < numRowsSource))
{
if (threadPos2D.x > 0)
{
_shared[threadIdx.x][threadIdx.y + 1] =
d_sourceImg[myIdLeft];
}
else
{
_shared[threadIdx.x][threadIdx.y + 1] =
make_uchar4(
255,
255,
255,
255);
}
}
__syncthreads();
// Fetch all standard elements of the tile
_shared[threadIdx.x + 1][threadIdx.y + 1] =
d_sourceImg[myId];
__syncthreads();
// Right column fetches all right neighbors
if (((threadIdx.x == (blockDim.x - 1)) ||
(threadPos2D.x == (numColsSource - 1))) &&
(threadPos2D.y < numRowsSource))
{
if (threadPos2D.x < (numColsSource - 1))
{
_shared[threadIdx.x + 2][threadIdx.y + 1] =
d_sourceImg[myIdRight];
}
else
{
_shared[threadIdx.x + 2][threadIdx.y + 1] =
make_uchar4(
255,
255,
255,
255);
}
}
__syncthreads();
// Bottom left thread fetches bottom left neighbor (if available)
if ((threadIdx.x == 0) &&
((threadIdx.y == (blockDim.y - 1)) ||
(threadPos2D.y == (numRowsSource - 1))))
{
if ((threadPos2D.x > 0) &&
(threadPos2D.y < (numRowsSource - 1)))
{
_shared[threadIdx.x][threadIdx.y + 2] =
d_sourceImg[myIdBottom - 1];
}
else
{
_shared[threadIdx.x][threadIdx.y + 2] =
make_uchar4(
255,
255,
255,
255);
}
}
__syncthreads();
// Bottom row fetches all bottom neighbors
if ((threadIdx.y == (blockDim.y - 1)) ||
(threadPos2D.y == (numRowsSource - 1)))
{
if (threadPos2D.y < (numRowsSource - 1))
{
_shared[threadIdx.x + 1][threadIdx.y + 2] =
d_sourceImg[myIdBottom];
}
else
{
_shared[threadIdx.x + 1][threadIdx.y + 2] =
make_uchar4(
255,
255,
255,
255);
}
}
__syncthreads();
// Bottom right thread fetches bottom right neighbor (if available)
if (((threadIdx.x == (blockDim.x - 1)) ||
(threadPos2D.x == (numColsSource - 1))) &&
((threadIdx.y == (blockDim.y - 1)) ||
(threadPos2D.y == (numRowsSource - 1))))
{
if ((threadPos2D.x < (numColsSource - 1)) &&
(threadPos2D.y < (numRowsSource - 1)))
{
_shared[threadIdx.x + 2][threadIdx.y + 2] =
d_sourceImg[myIdBottom + 1];
}
else
{
_shared[threadIdx.x + 2][threadIdx.y + 2] =
make_uchar4(
255,
255,
255,
255);
}
}
__syncthreads();
// Determine the outside pixels (set them to 0)
unsigned char _maskVal =
((_shared[threadIdx.x + 1][threadIdx.y + 1].x +
_shared[threadIdx.x + 1][threadIdx.y + 1].y +
_shared[threadIdx.x + 1][threadIdx.y + 1].z) < 3 * 255) ?
1 :
0;
d_mask[myId] = _maskVal;
__syncthreads();
if (_maskVal)
{
uchar4 _topNeighbor =
_shared[threadIdx.x + 1][threadIdx.y];
uchar4 _bottomNeighbor =
_shared[threadIdx.x + 1][threadIdx.y + 2];
uchar4 _leftNeighbor =
_shared[threadIdx.x][threadIdx.y + 1];
uchar4 _rightNeighbor =
_shared[threadIdx.x + 2][threadIdx.y + 1];
bool _topNeighborIn =
(_topNeighbor.x +
_topNeighbor.y +
_topNeighbor.z) < 3 * 255;
bool _bottomNeighborIn =
(_bottomNeighbor.x +
_bottomNeighbor.y +
_bottomNeighbor.z) < 3 * 255;
bool _leftNeighborIn =
(_leftNeighbor.x +
_leftNeighbor.y +
_leftNeighbor.z) < 3 * 255;
bool _rightNeighborIn =
(_rightNeighbor.x +
_rightNeighbor.y +
_rightNeighbor.z) < 3 * 255;
if (_topNeighborIn &&
_bottomNeighborIn &&
_leftNeighborIn &&
_rightNeighborIn)
{
d_border[myId] = 0;
d_interior[myId] = 1;
}
else
{
d_border[myId] = 1;
d_interior[myId] = 0;
}
}
else
{
d_border[myId] = 0;
d_interior[myId] = 0;
}
#if defined (ENABLE_DEBUG)
if ((blockIdx.x == DEBUG_COLLECT_SHARED_DATA_BLOCK_X) &&
(blockIdx.y == DEBUG_COLLECT_SHARED_DATA_BLOCK_Y))
{
int _sharedId =
((threadIdx.y + 1) * (BLOCK_SIZE_CALC_MASK_MAX_X + 2)) +
(threadIdx.x + 1);
d_shared_for_debug[_sharedId] =
_shared[threadIdx.x + 1][threadIdx.y + 1];
// Top left
if ((threadIdx.x == 0) &&
(threadIdx.y == 0))
{
_sharedId =
0;
d_shared_for_debug[_sharedId] =
_shared[threadIdx.x][threadIdx.y];
}
// Top row
if (threadIdx.y == 0)
{
_sharedId =
threadIdx.x + 1;
d_shared_for_debug[_sharedId] =
_shared[threadIdx.x + 1][threadIdx.y];
}
// Top right
if (((threadIdx.x == (blockDim.x - 1)) ||
(threadPos2D.x == (numColsSource - 1))) &&
(threadIdx.y == 0))
{
_sharedId =
threadIdx.x + 2;
d_shared_for_debug[_sharedId] =
_shared[threadIdx.x + 2][threadIdx.y];
}
// Left column
if ((threadIdx.x == 0) &&
(threadPos2D.y < numRowsSource))
{
_sharedId =
(threadIdx.y + 1) * (BLOCK_SIZE_CALC_MASK_MAX_X + 2);
d_shared_for_debug[_sharedId] =
_shared[threadIdx.x][threadIdx.y + 1];
}
// Right column
if (((threadIdx.x == (blockDim.x - 1)) ||
(threadPos2D.x == (numColsSource - 1))) &&
(threadPos2D.y < numRowsSource ))
{
_sharedId =
(threadIdx.y + 1) * (BLOCK_SIZE_CALC_MASK_MAX_X + 2) + threadIdx.x + 2;
d_shared_for_debug[_sharedId] =
_shared[threadIdx.x + 2][threadIdx.y + 1];
}
// Bottom left
if ((threadIdx.x == 0) &&
((threadIdx.y == (blockDim.y - 1)) ||
(threadPos2D.y == (numRowsSource - 1))))
{
_sharedId =
(threadIdx.y + 2) * (BLOCK_SIZE_CALC_MASK_MAX_X + 2);
d_shared_for_debug[_sharedId] =
_shared[threadIdx.x][threadIdx.y + 2];
}
// Bottom row
if ((threadIdx.y == (blockDim.y - 1)) ||
(threadPos2D.y == (numRowsSource - 1)))
{
_sharedId =
(threadIdx.y + 2) * (BLOCK_SIZE_CALC_MASK_MAX_X + 2) + threadIdx.x + 1;
d_shared_for_debug[_sharedId] =
_shared[threadIdx.x + 1][threadIdx.y + 2];
}
// Bottom right
if (((threadIdx.x == (blockDim.x - 1)) ||
(threadPos2D.x == (numColsSource - 1))) &&
((threadIdx.y == (blockDim.y - 1)) ||
(threadPos2D.y == (numRowsSource - 1))))
{
_sharedId =
(threadIdx.y + 2) * (BLOCK_SIZE_CALC_MASK_MAX_X + 2) + threadIdx.x + 2;
d_shared_for_debug[_sharedId] =
_shared[threadIdx.x + 2][threadIdx.y + 2];
}
}
#endif
#else
uchar4 _local =
d_sourceImg[myId];
d_mask[myId] =
((_local.x +
_local.y +
_local.z) < 3 * 255) ?
1 :
0;
#endif
}
}
void your_blend(const uchar4* const h_sourceImg, //IN
const size_t numRowsSource, const size_t numColsSource,
const uchar4* const h_destImg, //IN
uchar4* const h_blendedImg) //OUT
{
/* To Recap here are the steps you need to implement
1) Compute a mask of the pixels from the source image to be copied
The pixels that shouldn't be copied are completely white, they
have R=255, G=255, B=255. Any other pixels SHOULD be copied.
*/
size_t srcSize = numRowsSource * numColsSource;
#if defined (ENABLE_DEBUG)
unsigned char* h_mask_dbg =
new unsigned char[srcSize];
memset(
h_mask_dbg,
0x0,
srcSize * sizeof(unsigned char));
unsigned char* h_border_dbg =
new unsigned char[srcSize];
memset(
h_border_dbg,
0x0,
srcSize * sizeof(unsigned char));
unsigned char* h_interior_dbg =
new unsigned char[srcSize];
memset(
h_interior_dbg,
0x0,
srcSize * sizeof(unsigned char));
uchar4 *h_shared_for_debug =
new uchar4[(BLOCK_SIZE_CALC_MASK_MAX_X + 2) * (BLOCK_SIZE_CALC_MASK_MAX_Y + 2)];
memset(
h_shared_for_debug,
0x0,
(BLOCK_SIZE_CALC_MASK_MAX_X + 2) * (BLOCK_SIZE_CALC_MASK_MAX_Y + 2) * sizeof(uchar4));
uchar4 *d_shared_for_debug;
// Allocate memory on the device for rterieving shared data
checkCudaErrors(
hipMalloc(
&d_shared_for_debug,
(BLOCK_SIZE_CALC_MASK_MAX_X + 2) * (BLOCK_SIZE_CALC_MASK_MAX_Y + 2) * sizeof(uchar4)));
#endif
unsigned char* d_mask;
// Allocate memory on the device for storing the mask data
checkCudaErrors(
hipMalloc(
&d_mask,
srcSize * sizeof(unsigned char)));
unsigned char* d_border;
// Allocate memory on the device for storing the border data
checkCudaErrors(
hipMalloc(
&d_border,
srcSize * sizeof(unsigned char)));
unsigned char* d_interior;
// Allocate memory on the device for storing the interior data
checkCudaErrors(
hipMalloc(
&d_interior,
srcSize * sizeof(unsigned char)));
uchar4* d_sourceImg;
// Allocate memory on the device for storing the source image data
checkCudaErrors(
hipMalloc(
&d_sourceImg,
srcSize * sizeof(uchar4)));
// Copy source image data to device
checkCudaErrors(
hipMemcpy(
d_sourceImg,
h_sourceImg,
srcSize * sizeof(uchar4),
hipMemcpyHostToDevice));
int gridSizeX =
(numColsSource - 1) / BLOCK_SIZE_CALC_MASK_MAX_X + 1;
int gridSizeY =
(numRowsSource - 1) / BLOCK_SIZE_CALC_MASK_MAX_Y + 1;
// Set block size (i.e., number of threads per block)
const dim3 blockSize(
BLOCK_SIZE_CALC_MASK_MAX_X,
BLOCK_SIZE_CALC_MASK_MAX_Y,
1);
// Set grid size (i.e., number of blocks per kernel launch)
const dim3 gridSize(
gridSizeX,
gridSizeY,
1);
hipLaunchKernelGGL(( calculateMaskKernel), dim3(gridSize), dim3(blockSize), 0, 0,
d_sourceImg,
numRowsSource,
numColsSource,
d_mask,
d_border,
d_interior
#if defined(ENABLE_DEBUG)
, d_shared_for_debug
#endif
);
#if defined (ENABLE_STRICT_ERROR_CHECKING)
hipDeviceSynchronize(); checkCudaErrors(hipGetLastError());
#endif
#if defined (ENABLE_DEBUG)
// Copy mask data to host (debug)
checkCudaErrors(
hipMemcpy(
h_mask_dbg,
d_mask,
srcSize * sizeof(unsigned char),
hipMemcpyDeviceToHost));
// Copy border data to host (debug)
checkCudaErrors(
hipMemcpy(
h_border_dbg,
d_border,
srcSize * sizeof(unsigned char),
hipMemcpyDeviceToHost));
// Copy interior data to host (debug)
checkCudaErrors(
hipMemcpy(
h_interior_dbg,
d_interior,
srcSize * sizeof(unsigned char),
hipMemcpyDeviceToHost));
// Copy shared data to host (debug)
checkCudaErrors(
hipMemcpy(
h_shared_for_debug,
d_shared_for_debug,
(BLOCK_SIZE_CALC_MASK_MAX_X + 2) * (BLOCK_SIZE_CALC_MASK_MAX_Y + 2) * sizeof(uchar4),
hipMemcpyDeviceToHost));
#endif
hipFree(
d_mask);
hipFree(
d_border);
hipFree(
d_interior);
hipFree(
d_sourceImg);
/*
2) Compute the interior and border regions of the mask. An interior
pixel has all 4 neighbors also inside the mask. A border pixel is
in the mask itself, but has at least one neighbor that isn't.
3) Separate out the incoming image into three separate channels
4) Create two float(!) buffers for each color channel that will
act as our guesses. Initialize them to the respective color
channel of the source image since that will act as our intial guess.
5) For each color channel perform the Jacobi iteration described
above 800 times.
6) Create the output image by replacing all the interior pixels
in the destination image with the result of the Jacobi iterations.
Just cast the floating point values to unsigned chars since we have
already made sure to clamp them to the correct range.
Since this is final assignment we provide little boilerplate code to
help you. Notice that all the input/output pointers are HOST pointers.
You will have to allocate all of your own GPU memory and perform your own
memcopies to get data in and out of the GPU memory.
Remember to wrap all of your calls with checkCudaErrors() to catch any
thing that might go wrong. After each kernel call do:
hipDeviceSynchronize(); checkCudaErrors(hipGetLastError());
to catch any errors that happened while executing the kernel.
*/
#if defined (ENABLE_DEBUG)
reference_calc_custom(h_sourceImg, numRowsSource, numColsSource,
h_destImg, h_blendedImg, h_mask_dbg, h_border_dbg, h_interior_dbg);
delete []h_mask_dbg;
delete []h_border_dbg;
delete []h_interior_dbg;
hipFree(
d_shared_for_debug);
delete []h_shared_for_debug;
#endif
}
| ffdca4c7565efb21c456864f5237fb03a5fd3f3c.cu | //Udacity HW 6
//Poisson Blending
/* Background
==========
The goal for this assignment is to take one image (the source) and
paste it into another image (the destination) attempting to match the
two images so that the pasting is non-obvious. This is
known as a "seamless clone".
The basic ideas are as follows:
1) Figure out the interior and border of the source image
2) Use the values of the border pixels in the destination image
as boundary conditions for solving a Poisson equation that tells
us how to blend the images.
No pixels from the destination except pixels on the border
are used to compute the match.
Solving the Poisson Equation
============================
There are multiple ways to solve this equation - we choose an iterative
method - specifically the Jacobi method. Iterative methods start with
a guess of the solution and then iterate to try and improve the guess
until it stops changing. If the problem was well-suited for the method
then it will stop and where it stops will be the solution.
The Jacobi method is the simplest iterative method and converges slowly -
that is we need a lot of iterations to get to the answer, but it is the
easiest method to write.
Jacobi Iterations
=================
Our initial guess is going to be the source image itself. This is a pretty
good guess for what the blended image will look like and it means that
we won't have to do as many iterations compared to if we had started far
from the final solution.
ImageGuess_prev (Floating point)
ImageGuess_next (Floating point)
DestinationImg
SourceImg
Follow these steps to implement one iteration:
1) For every pixel p in the interior, compute two sums over the four neighboring pixels:
Sum1: If the neighbor is in the interior then += ImageGuess_prev[neighbor]
else if the neighbor in on the border then += DestinationImg[neighbor]
Sum2: += SourceImg[p] - SourceImg[neighbor] (for all four neighbors)
2) Calculate the new pixel value:
float newVal= (Sum1 + Sum2) / 4.f <------ Notice that the result is FLOATING POINT
ImageGuess_next[p] = min(255, max(0, newVal)); //clamp to [0, 255]
In this assignment we will do 800 iterations.
*/
#include "utils.h"
#include <thrust/host_vector.h>
#include "reference_calc_custom.h"
#define BLOCK_SIZE_CALC_MASK_MAX_X 22
#define BLOCK_SIZE_CALC_MASK_MAX_Y 22
#define ENABLE_DEBUG
#if defined(ENABLE_DEBUG)
#define DEBUG_COLLECT_SHARED_DATA_BLOCK_X 0
#define DEBUG_COLLECT_SHARED_DATA_BLOCK_Y 0
#endif
// #define ENABLE_STRICT_ERROR_CHECKING
__global__
void calculateMaskKernel(
const uchar4 * const d_sourceImg,
const size_t numRowsSource,
const size_t numColsSource,
unsigned char* d_mask,
unsigned char* d_border,
unsigned char* d_interior
#if defined (ENABLE_DEBUG)
, uchar4 * d_shared_for_debug
#endif
)
{
#define MASK_KERNEL_USE_SHARED
#if defined (MASK_KERNEL_USE_SHARED)
__shared__ uchar4 _shared[BLOCK_SIZE_CALC_MASK_MAX_X + 2][BLOCK_SIZE_CALC_MASK_MAX_Y + 2];
#endif
const int2 threadPos2D =
make_int2(
blockIdx.x * blockDim.x + threadIdx.x,
blockIdx.y * blockDim.y + threadIdx.y);
if ( threadPos2D.x >= numColsSource ||
threadPos2D.y >= numRowsSource )
{
return;
}
else
{
const int myId =
threadPos2D.y * numColsSource +
threadPos2D.x;
#if defined (MASK_KERNEL_USE_SHARED)
int myIdTop =
(threadPos2D.y - 1) * numColsSource +
threadPos2D.x;
int myIdBottom =
(threadPos2D.y + 1) * numColsSource +
threadPos2D.x;
int myIdLeft =
threadPos2D.y * numColsSource +
(threadPos2D.x - 1);
int myIdRight =
threadPos2D.y * numColsSource +
(threadPos2D.x + 1);
// Top left thread fetches top left neighbor (if available)
if ((threadIdx.x == 0) &&
(threadIdx.y == 0))
{
if ((threadPos2D.x > 0) &&
(threadPos2D.y > 0))
{
_shared[threadIdx.x][threadIdx.y] =
d_sourceImg[myIdTop - 1];
}
else
{
_shared[threadIdx.x][threadIdx.y] =
make_uchar4(
255,
255,
255,
255);
}
}
__syncthreads();
// Top row fetches all top neighbors
if (threadIdx.y == 0)
{
if (threadPos2D.y > 0)
{
_shared[threadIdx.x + 1][threadIdx.y] =
d_sourceImg[myIdTop];
}
else
{
_shared[threadIdx.x + 1][threadIdx.y] =
make_uchar4(
255,
255,
255,
255);
}
}
__syncthreads();
// Top right thread fetches top right neighbor (if available)
if (((threadIdx.x == (blockDim.x - 1)) ||
(threadPos2D.x == (numColsSource - 1))) &&
(threadIdx.y == 0))
{
if ((threadPos2D.x < (numColsSource - 1)) &&
(threadPos2D.y > 0))
{
_shared[threadIdx.x + 2][threadIdx.y] =
d_sourceImg[myIdTop + 1];
}
else
{
_shared[threadIdx.x + 2][threadIdx.y] =
make_uchar4(
255,
255,
255,
255);
}
}
__syncthreads();
// Left column fetches all left neighbors
if ((threadIdx.x == 0) &&
(threadPos2D.y < numRowsSource))
{
if (threadPos2D.x > 0)
{
_shared[threadIdx.x][threadIdx.y + 1] =
d_sourceImg[myIdLeft];
}
else
{
_shared[threadIdx.x][threadIdx.y + 1] =
make_uchar4(
255,
255,
255,
255);
}
}
__syncthreads();
// Fetch all standard elements of the tile
_shared[threadIdx.x + 1][threadIdx.y + 1] =
d_sourceImg[myId];
__syncthreads();
// Right column fetches all right neighbors
if (((threadIdx.x == (blockDim.x - 1)) ||
(threadPos2D.x == (numColsSource - 1))) &&
(threadPos2D.y < numRowsSource))
{
if (threadPos2D.x < (numColsSource - 1))
{
_shared[threadIdx.x + 2][threadIdx.y + 1] =
d_sourceImg[myIdRight];
}
else
{
_shared[threadIdx.x + 2][threadIdx.y + 1] =
make_uchar4(
255,
255,
255,
255);
}
}
__syncthreads();
// Bottom left thread fetches bottom left neighbor (if available)
if ((threadIdx.x == 0) &&
((threadIdx.y == (blockDim.y - 1)) ||
(threadPos2D.y == (numRowsSource - 1))))
{
if ((threadPos2D.x > 0) &&
(threadPos2D.y < (numRowsSource - 1)))
{
_shared[threadIdx.x][threadIdx.y + 2] =
d_sourceImg[myIdBottom - 1];
}
else
{
_shared[threadIdx.x][threadIdx.y + 2] =
make_uchar4(
255,
255,
255,
255);
}
}
__syncthreads();
// Bottom row fetches all bottom neighbors
if ((threadIdx.y == (blockDim.y - 1)) ||
(threadPos2D.y == (numRowsSource - 1)))
{
if (threadPos2D.y < (numRowsSource - 1))
{
_shared[threadIdx.x + 1][threadIdx.y + 2] =
d_sourceImg[myIdBottom];
}
else
{
_shared[threadIdx.x + 1][threadIdx.y + 2] =
make_uchar4(
255,
255,
255,
255);
}
}
__syncthreads();
// Bottom right thread fetches bottom right neighbor (if available)
if (((threadIdx.x == (blockDim.x - 1)) ||
(threadPos2D.x == (numColsSource - 1))) &&
((threadIdx.y == (blockDim.y - 1)) ||
(threadPos2D.y == (numRowsSource - 1))))
{
if ((threadPos2D.x < (numColsSource - 1)) &&
(threadPos2D.y < (numRowsSource - 1)))
{
_shared[threadIdx.x + 2][threadIdx.y + 2] =
d_sourceImg[myIdBottom + 1];
}
else
{
_shared[threadIdx.x + 2][threadIdx.y + 2] =
make_uchar4(
255,
255,
255,
255);
}
}
__syncthreads();
// Determine the outside pixels (set them to 0)
unsigned char _maskVal =
((_shared[threadIdx.x + 1][threadIdx.y + 1].x +
_shared[threadIdx.x + 1][threadIdx.y + 1].y +
_shared[threadIdx.x + 1][threadIdx.y + 1].z) < 3 * 255) ?
1 :
0;
d_mask[myId] = _maskVal;
__syncthreads();
if (_maskVal)
{
uchar4 _topNeighbor =
_shared[threadIdx.x + 1][threadIdx.y];
uchar4 _bottomNeighbor =
_shared[threadIdx.x + 1][threadIdx.y + 2];
uchar4 _leftNeighbor =
_shared[threadIdx.x][threadIdx.y + 1];
uchar4 _rightNeighbor =
_shared[threadIdx.x + 2][threadIdx.y + 1];
bool _topNeighborIn =
(_topNeighbor.x +
_topNeighbor.y +
_topNeighbor.z) < 3 * 255;
bool _bottomNeighborIn =
(_bottomNeighbor.x +
_bottomNeighbor.y +
_bottomNeighbor.z) < 3 * 255;
bool _leftNeighborIn =
(_leftNeighbor.x +
_leftNeighbor.y +
_leftNeighbor.z) < 3 * 255;
bool _rightNeighborIn =
(_rightNeighbor.x +
_rightNeighbor.y +
_rightNeighbor.z) < 3 * 255;
if (_topNeighborIn &&
_bottomNeighborIn &&
_leftNeighborIn &&
_rightNeighborIn)
{
d_border[myId] = 0;
d_interior[myId] = 1;
}
else
{
d_border[myId] = 1;
d_interior[myId] = 0;
}
}
else
{
d_border[myId] = 0;
d_interior[myId] = 0;
}
#if defined (ENABLE_DEBUG)
if ((blockIdx.x == DEBUG_COLLECT_SHARED_DATA_BLOCK_X) &&
(blockIdx.y == DEBUG_COLLECT_SHARED_DATA_BLOCK_Y))
{
int _sharedId =
((threadIdx.y + 1) * (BLOCK_SIZE_CALC_MASK_MAX_X + 2)) +
(threadIdx.x + 1);
d_shared_for_debug[_sharedId] =
_shared[threadIdx.x + 1][threadIdx.y + 1];
// Top left
if ((threadIdx.x == 0) &&
(threadIdx.y == 0))
{
_sharedId =
0;
d_shared_for_debug[_sharedId] =
_shared[threadIdx.x][threadIdx.y];
}
// Top row
if (threadIdx.y == 0)
{
_sharedId =
threadIdx.x + 1;
d_shared_for_debug[_sharedId] =
_shared[threadIdx.x + 1][threadIdx.y];
}
// Top right
if (((threadIdx.x == (blockDim.x - 1)) ||
(threadPos2D.x == (numColsSource - 1))) &&
(threadIdx.y == 0))
{
_sharedId =
threadIdx.x + 2;
d_shared_for_debug[_sharedId] =
_shared[threadIdx.x + 2][threadIdx.y];
}
// Left column
if ((threadIdx.x == 0) &&
(threadPos2D.y < numRowsSource))
{
_sharedId =
(threadIdx.y + 1) * (BLOCK_SIZE_CALC_MASK_MAX_X + 2);
d_shared_for_debug[_sharedId] =
_shared[threadIdx.x][threadIdx.y + 1];
}
// Right column
if (((threadIdx.x == (blockDim.x - 1)) ||
(threadPos2D.x == (numColsSource - 1))) &&
(threadPos2D.y < numRowsSource ))
{
_sharedId =
(threadIdx.y + 1) * (BLOCK_SIZE_CALC_MASK_MAX_X + 2) + threadIdx.x + 2;
d_shared_for_debug[_sharedId] =
_shared[threadIdx.x + 2][threadIdx.y + 1];
}
// Bottom left
if ((threadIdx.x == 0) &&
((threadIdx.y == (blockDim.y - 1)) ||
(threadPos2D.y == (numRowsSource - 1))))
{
_sharedId =
(threadIdx.y + 2) * (BLOCK_SIZE_CALC_MASK_MAX_X + 2);
d_shared_for_debug[_sharedId] =
_shared[threadIdx.x][threadIdx.y + 2];
}
// Bottom row
if ((threadIdx.y == (blockDim.y - 1)) ||
(threadPos2D.y == (numRowsSource - 1)))
{
_sharedId =
(threadIdx.y + 2) * (BLOCK_SIZE_CALC_MASK_MAX_X + 2) + threadIdx.x + 1;
d_shared_for_debug[_sharedId] =
_shared[threadIdx.x + 1][threadIdx.y + 2];
}
// Bottom right
if (((threadIdx.x == (blockDim.x - 1)) ||
(threadPos2D.x == (numColsSource - 1))) &&
((threadIdx.y == (blockDim.y - 1)) ||
(threadPos2D.y == (numRowsSource - 1))))
{
_sharedId =
(threadIdx.y + 2) * (BLOCK_SIZE_CALC_MASK_MAX_X + 2) + threadIdx.x + 2;
d_shared_for_debug[_sharedId] =
_shared[threadIdx.x + 2][threadIdx.y + 2];
}
}
#endif
#else
uchar4 _local =
d_sourceImg[myId];
d_mask[myId] =
((_local.x +
_local.y +
_local.z) < 3 * 255) ?
1 :
0;
#endif
}
}
void your_blend(const uchar4* const h_sourceImg, //IN
const size_t numRowsSource, const size_t numColsSource,
const uchar4* const h_destImg, //IN
uchar4* const h_blendedImg) //OUT
{
/* To Recap here are the steps you need to implement
1) Compute a mask of the pixels from the source image to be copied
The pixels that shouldn't be copied are completely white, they
have R=255, G=255, B=255. Any other pixels SHOULD be copied.
*/
size_t srcSize = numRowsSource * numColsSource;
#if defined (ENABLE_DEBUG)
unsigned char* h_mask_dbg =
new unsigned char[srcSize];
memset(
h_mask_dbg,
0x0,
srcSize * sizeof(unsigned char));
unsigned char* h_border_dbg =
new unsigned char[srcSize];
memset(
h_border_dbg,
0x0,
srcSize * sizeof(unsigned char));
unsigned char* h_interior_dbg =
new unsigned char[srcSize];
memset(
h_interior_dbg,
0x0,
srcSize * sizeof(unsigned char));
uchar4 *h_shared_for_debug =
new uchar4[(BLOCK_SIZE_CALC_MASK_MAX_X + 2) * (BLOCK_SIZE_CALC_MASK_MAX_Y + 2)];
memset(
h_shared_for_debug,
0x0,
(BLOCK_SIZE_CALC_MASK_MAX_X + 2) * (BLOCK_SIZE_CALC_MASK_MAX_Y + 2) * sizeof(uchar4));
uchar4 *d_shared_for_debug;
// Allocate memory on the device for rterieving shared data
checkCudaErrors(
cudaMalloc(
&d_shared_for_debug,
(BLOCK_SIZE_CALC_MASK_MAX_X + 2) * (BLOCK_SIZE_CALC_MASK_MAX_Y + 2) * sizeof(uchar4)));
#endif
unsigned char* d_mask;
// Allocate memory on the device for storing the mask data
checkCudaErrors(
cudaMalloc(
&d_mask,
srcSize * sizeof(unsigned char)));
unsigned char* d_border;
// Allocate memory on the device for storing the border data
checkCudaErrors(
cudaMalloc(
&d_border,
srcSize * sizeof(unsigned char)));
unsigned char* d_interior;
// Allocate memory on the device for storing the interior data
checkCudaErrors(
cudaMalloc(
&d_interior,
srcSize * sizeof(unsigned char)));
uchar4* d_sourceImg;
// Allocate memory on the device for storing the source image data
checkCudaErrors(
cudaMalloc(
&d_sourceImg,
srcSize * sizeof(uchar4)));
// Copy source image data to device
checkCudaErrors(
cudaMemcpy(
d_sourceImg,
h_sourceImg,
srcSize * sizeof(uchar4),
cudaMemcpyHostToDevice));
int gridSizeX =
(numColsSource - 1) / BLOCK_SIZE_CALC_MASK_MAX_X + 1;
int gridSizeY =
(numRowsSource - 1) / BLOCK_SIZE_CALC_MASK_MAX_Y + 1;
// Set block size (i.e., number of threads per block)
const dim3 blockSize(
BLOCK_SIZE_CALC_MASK_MAX_X,
BLOCK_SIZE_CALC_MASK_MAX_Y,
1);
// Set grid size (i.e., number of blocks per kernel launch)
const dim3 gridSize(
gridSizeX,
gridSizeY,
1);
calculateMaskKernel<<<gridSize, blockSize>>>(
d_sourceImg,
numRowsSource,
numColsSource,
d_mask,
d_border,
d_interior
#if defined(ENABLE_DEBUG)
, d_shared_for_debug
#endif
);
#if defined (ENABLE_STRICT_ERROR_CHECKING)
cudaDeviceSynchronize(); checkCudaErrors(cudaGetLastError());
#endif
#if defined (ENABLE_DEBUG)
// Copy mask data to host (debug)
checkCudaErrors(
cudaMemcpy(
h_mask_dbg,
d_mask,
srcSize * sizeof(unsigned char),
cudaMemcpyDeviceToHost));
// Copy border data to host (debug)
checkCudaErrors(
cudaMemcpy(
h_border_dbg,
d_border,
srcSize * sizeof(unsigned char),
cudaMemcpyDeviceToHost));
// Copy interior data to host (debug)
checkCudaErrors(
cudaMemcpy(
h_interior_dbg,
d_interior,
srcSize * sizeof(unsigned char),
cudaMemcpyDeviceToHost));
// Copy shared data to host (debug)
checkCudaErrors(
cudaMemcpy(
h_shared_for_debug,
d_shared_for_debug,
(BLOCK_SIZE_CALC_MASK_MAX_X + 2) * (BLOCK_SIZE_CALC_MASK_MAX_Y + 2) * sizeof(uchar4),
cudaMemcpyDeviceToHost));
#endif
cudaFree(
d_mask);
cudaFree(
d_border);
cudaFree(
d_interior);
cudaFree(
d_sourceImg);
/*
2) Compute the interior and border regions of the mask. An interior
pixel has all 4 neighbors also inside the mask. A border pixel is
in the mask itself, but has at least one neighbor that isn't.
3) Separate out the incoming image into three separate channels
4) Create two float(!) buffers for each color channel that will
act as our guesses. Initialize them to the respective color
channel of the source image since that will act as our intial guess.
5) For each color channel perform the Jacobi iteration described
above 800 times.
6) Create the output image by replacing all the interior pixels
in the destination image with the result of the Jacobi iterations.
Just cast the floating point values to unsigned chars since we have
already made sure to clamp them to the correct range.
Since this is final assignment we provide little boilerplate code to
help you. Notice that all the input/output pointers are HOST pointers.
You will have to allocate all of your own GPU memory and perform your own
memcopies to get data in and out of the GPU memory.
Remember to wrap all of your calls with checkCudaErrors() to catch any
thing that might go wrong. After each kernel call do:
cudaDeviceSynchronize(); checkCudaErrors(cudaGetLastError());
to catch any errors that happened while executing the kernel.
*/
#if defined (ENABLE_DEBUG)
reference_calc_custom(h_sourceImg, numRowsSource, numColsSource,
h_destImg, h_blendedImg, h_mask_dbg, h_border_dbg, h_interior_dbg);
delete []h_mask_dbg;
delete []h_border_dbg;
delete []h_interior_dbg;
cudaFree(
d_shared_for_debug);
delete []h_shared_for_debug;
#endif
}
|
0476819e69a7bcedda407f9faced8e58042c8d50.hip | // !!! This is a file automatically generated by hipify!!!
#include <stdio.h>
#include <stdlib.h>
#include <hip/hip_runtime.h>
#include <assert.h>
//#include <time.h>
#define N 2//(64*64)//(2048*2048)
#define THREADS_PER_BLOCK 1//512
__global__ void Asum(int *a, int *b, int *c){
int index = threadIdx.x + blockIdx.x*blockDim.x;
c[index] = a[index] + b[index];
}
| 0476819e69a7bcedda407f9faced8e58042c8d50.cu | #include <stdio.h>
#include <stdlib.h>
#include <cuda.h>
#include <assert.h>
//#include <time.h>
#define N 2//(64*64)//(2048*2048)
#define THREADS_PER_BLOCK 1//512
__global__ void Asum(int *a, int *b, int *c){
int index = threadIdx.x + blockIdx.x*blockDim.x;
c[index] = a[index] + b[index];
}
|
ef41f9582bb8dd0b264ab177b4ed72d82ec80b6a.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/*
* Copyright 1993-2010 NVIDIA Corporation. All rights reserved.
*
* NVIDIA Corporation and its licensors retain all intellectual property and
* proprietary rights in and to this software and related documentation.
* Any use, reproduction, disclosure, or distribution of this software
* and related documentation without an express license agreement from
* NVIDIA Corporation is strictly prohibited.
*
* Please refer to the applicable NVIDIA end user license agreement (EULA)
* associated with this source code for terms and conditions that govern
* your use of this NVIDIA software.
*
*/
#include "../book.h"
#include <iostream>
#include <iomanip>
using std::cout;
using std::endl;
using std::ios;
#define imin(a,b) (a<b?a:b)
const int N = 33 * 1024;
const int threadsPerBlock = 256;
const int blocksPerGrid =
imin( 32, (N+threadsPerBlock-1) / threadsPerBlock );
__global__ void dot( int *a, int *b, int *c ) {
__shared__ int cache[threadsPerBlock];
int tid = threadIdx.x + blockIdx.x * blockDim.x;
int cacheIndex = threadIdx.x;
int temp = 0;
while (tid < N) {
temp += a[tid] * b[tid];
tid += blockDim.x * gridDim.x;
}
// set the cache values
cache[cacheIndex] = temp;
// synchronize threads in this block
__syncthreads();
// for reductions, threadsPerBlock must be a power of 2
// because of the following code
int i = blockDim.x/2;
while (i != 0) {
if (cacheIndex < i)
cache[cacheIndex] += cache[cacheIndex + i];
__syncthreads();
i /= 2;
}
if (cacheIndex == 0)
c[blockIdx.x] = cache[0];
}
int DotProductOnBook( void ) {
int *a, *b, c, *partial_c;
int *dev_a, *dev_b, *dev_partial_c;
// allocate memory on the cpu side
a = (int*)malloc( N*sizeof(int) );
b = (int*)malloc( N*sizeof(int) );
partial_c = (int*)malloc( blocksPerGrid*sizeof(int) );
// allocate the memory on the GPU
HANDLE_ERROR( hipMalloc( (void**)&dev_a,
N*sizeof(int) ) );
HANDLE_ERROR( hipMalloc( (void**)&dev_b,
N*sizeof(int) ) );
HANDLE_ERROR( hipMalloc( (void**)&dev_partial_c,
blocksPerGrid*sizeof(int) ) );
// fill in the host memory with data
for (int i=0; i<N; i++) {
a[i] = 1;
b[i] = 1;
}
// copy the arrays 'a' and 'b' to the GPU
HANDLE_ERROR( hipMemcpy( dev_a, a, N*sizeof(int),
hipMemcpyHostToDevice ) );
HANDLE_ERROR( hipMemcpy( dev_b, b, N*sizeof(int),
hipMemcpyHostToDevice ) );
hipLaunchKernelGGL(( dot), dim3(blocksPerGrid),dim3(threadsPerBlock), 0, 0, dev_a, dev_b,
dev_partial_c );
// copy the array 'c' back from the GPU to the CPU
HANDLE_ERROR( hipMemcpy( partial_c, dev_partial_c,
blocksPerGrid*sizeof(int),
hipMemcpyDeviceToHost ) );
// finish up on the CPU side
c = 0;
for (int i=0; i<blocksPerGrid; i++) {
c += partial_c[i];
}
#define sum_squares(x) (x*(x+1)*(2*x+1)/6)
cout.setf(ios::fixed);
cout << "reuslt on GPU is " << c << endl
<< "result on CPU is " << sum_squares( (int)(N - 1) ) << endl
<< "The difference is " << c - sum_squares((int)N - 1) << endl;
// free memory on the gpu side
HANDLE_ERROR( hipFree( dev_a ) );
HANDLE_ERROR( hipFree( dev_b ) );
HANDLE_ERROR( hipFree( dev_partial_c ) );
// free memory on the cpu side
free( a );
free( b );
free( partial_c );
}
| ef41f9582bb8dd0b264ab177b4ed72d82ec80b6a.cu | /*
* Copyright 1993-2010 NVIDIA Corporation. All rights reserved.
*
* NVIDIA Corporation and its licensors retain all intellectual property and
* proprietary rights in and to this software and related documentation.
* Any use, reproduction, disclosure, or distribution of this software
* and related documentation without an express license agreement from
* NVIDIA Corporation is strictly prohibited.
*
* Please refer to the applicable NVIDIA end user license agreement (EULA)
* associated with this source code for terms and conditions that govern
* your use of this NVIDIA software.
*
*/
#include "../book.h"
#include <iostream>
#include <iomanip>
using std::cout;
using std::endl;
using std::ios;
#define imin(a,b) (a<b?a:b)
const int N = 33 * 1024;
const int threadsPerBlock = 256;
const int blocksPerGrid =
imin( 32, (N+threadsPerBlock-1) / threadsPerBlock );
__global__ void dot( int *a, int *b, int *c ) {
__shared__ int cache[threadsPerBlock];
int tid = threadIdx.x + blockIdx.x * blockDim.x;
int cacheIndex = threadIdx.x;
int temp = 0;
while (tid < N) {
temp += a[tid] * b[tid];
tid += blockDim.x * gridDim.x;
}
// set the cache values
cache[cacheIndex] = temp;
// synchronize threads in this block
__syncthreads();
// for reductions, threadsPerBlock must be a power of 2
// because of the following code
int i = blockDim.x/2;
while (i != 0) {
if (cacheIndex < i)
cache[cacheIndex] += cache[cacheIndex + i];
__syncthreads();
i /= 2;
}
if (cacheIndex == 0)
c[blockIdx.x] = cache[0];
}
int DotProductOnBook( void ) {
int *a, *b, c, *partial_c;
int *dev_a, *dev_b, *dev_partial_c;
// allocate memory on the cpu side
a = (int*)malloc( N*sizeof(int) );
b = (int*)malloc( N*sizeof(int) );
partial_c = (int*)malloc( blocksPerGrid*sizeof(int) );
// allocate the memory on the GPU
HANDLE_ERROR( cudaMalloc( (void**)&dev_a,
N*sizeof(int) ) );
HANDLE_ERROR( cudaMalloc( (void**)&dev_b,
N*sizeof(int) ) );
HANDLE_ERROR( cudaMalloc( (void**)&dev_partial_c,
blocksPerGrid*sizeof(int) ) );
// fill in the host memory with data
for (int i=0; i<N; i++) {
a[i] = 1;
b[i] = 1;
}
// copy the arrays 'a' and 'b' to the GPU
HANDLE_ERROR( cudaMemcpy( dev_a, a, N*sizeof(int),
cudaMemcpyHostToDevice ) );
HANDLE_ERROR( cudaMemcpy( dev_b, b, N*sizeof(int),
cudaMemcpyHostToDevice ) );
dot<<<blocksPerGrid,threadsPerBlock>>>( dev_a, dev_b,
dev_partial_c );
// copy the array 'c' back from the GPU to the CPU
HANDLE_ERROR( cudaMemcpy( partial_c, dev_partial_c,
blocksPerGrid*sizeof(int),
cudaMemcpyDeviceToHost ) );
// finish up on the CPU side
c = 0;
for (int i=0; i<blocksPerGrid; i++) {
c += partial_c[i];
}
#define sum_squares(x) (x*(x+1)*(2*x+1)/6)
cout.setf(ios::fixed);
cout << "reuslt on GPU is " << c << endl
<< "result on CPU is " << sum_squares( (int)(N - 1) ) << endl
<< "The difference is " << c - sum_squares((int)N - 1) << endl;
// free memory on the gpu side
HANDLE_ERROR( cudaFree( dev_a ) );
HANDLE_ERROR( cudaFree( dev_b ) );
HANDLE_ERROR( cudaFree( dev_partial_c ) );
// free memory on the cpu side
free( a );
free( b );
free( partial_c );
}
|
7a8efbb84e9e138706d37905ce0ea6899695ae82.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/*-----------------------------------------------------------------------------
CS 790-058 GPGPU
Final Project (Point Location using GPU)
This file contains the GPU Kernels
by Shawn Brown ([email protected])
-----------------------------------------------------------------------------*/
#ifndef _KD_GPU3_H_
#define _KD_GPU3_H_
/*---------------------------------------------------------
Includes
---------------------------------------------------------*/
#include <stdio.h>
//#include <float.h>
#include "KDTree_GPU.h"
#include "KD_API.h"
/*---------------------------------------------------------
Function Definitions
---------------------------------------------------------*/
/*---------------------------------------------------------
Name: KDTREE_DIST_V3
Desc: Finds Nearest Neighbor in KDTree
for each query point
Notes: Improved Version
Fewer memory accesses
and less stack space required
resulting in more threads being
able to run in parrallel
---------------------------------------------------------*/
__global__ void
KDTREE_DIST_V3
(
GPU_NN_Result * qrs, // OUT: Results of KD Nearest Neighbor Algorithm
float4 * qps, // IN: query points to compute distance for (1D or 2D field)
GPUNode_2D_MED * kdTree, // IN: KD Tree (Nodes)
unsigned int * ids, // IN: IDs (from Indexs)
int rootIdx, // IN: index of root node in KD Tree
int w // IN: width of 2D query field (# of columns)
)
{
// Local Parameters
GPU_NN_Result best;
unsigned int currIdx, currAxis, currInOut, nextAxis;
float dx, dy;
float diff, diff2;
float diffDist2;
float queryValue, splitValue;
unsigned int stackTop = 0;
__shared__ float4 queryPoints[KD_THREADS_PER_BLOCK];
__shared__ GPUNode_2D_MED currNodes[KD_THREADS_PER_BLOCK];
__shared__ GPU_Search searchStack[KD_STACK_SIZE][KD_THREADS_PER_BLOCK];
unsigned int haveInfo;
const int threadsPerRow = blockDim.x; // Columns (per block)
const int rowsPerBlock = blockDim.y; // Rows (per block)
// Block index
int bx = blockIdx.x; // column in grid
int by = blockIdx.y; // row in grid
// Thread index
int tx = threadIdx.x; // column in block
int ty = threadIdx.y; // row in block
int tidx = (ty*threadsPerRow) + tx;
// Compute Query Index
int currCol = (bx * threadsPerRow) + tx;
int currRow = (by * rowsPerBlock) + ty;
int qidx = currRow * w + currCol;
// Load current Query Point into local (fast) memory
queryPoints[tidx] = qps[qidx];
// Set Initial Guess equal to root node
best.Id = rootIdx;
best.Dist = 3.0e+38F; // Choose A huge Number to start with for Best Distance
//best.cNodes = 0;
// Store root info
haveInfo = 1;
currIdx = rootIdx;
currAxis = 0;
currInOut = 0; // Outside
splitValue = 3.0e+38f; // Use huge value to simulate infinity
nextAxis = 1;
// Load root node into local fast node
//currNodes[tidx] = kdTree[currIdx]
// No longer add to stack top
// Put root search info on stack
//searchStack[stackTop][tidx].nodeFlags = (rootIdx & 0x1FFFFFFF); // | ((currAxis << 29) & 0x60000000); // | ((currInOut << 31) & 0x8000000);;
//searchStack[stackTop][tidx].splitVal = 3.0e+38F;
//stackTop++;
while (stackTop != 0)
{
// Statistics
//best.cNodes++;
if (haveInfo == 0)
{
// Get current Search Node from top of stack
stackTop--;
// Get Node Info
currIdx = (searchStack[stackTop][tidx].nodeFlags & 0x1FFFFFFFU);
currAxis = (searchStack[stackTop][tidx].nodeFlags & 0x60000000U) >> 29;
currInOut = (searchStack[stackTop][tidx].nodeFlags & 0x80000000U) >> 31;
splitValue = searchStack[stackTop][tidx].splitVal; // Get Split Value of Parent Node
nextAxis = ((currAxis == 0) ? 1 : 0);
// Early Exit Check
if (currInOut == 1) // KD_OUT
{
// Next Line is effectively queryValue = queryPoints[prevAxis];
queryValue = ((currAxis == 0) ? queryPoints[tidx].y : queryPoints[tidx].x);
//splitValue = searchStack[stackTop][tidx].splitVal; // Split Value of Parent Node
diff = splitValue - queryValue;
diff2 = diff*diff;
if (diff2 >= best.Dist)
{
// We can do an early exit for this node
continue;
}
}
}
// else
//{
// Already have info from root or traversing onside node
//}
// WARNING - It's much faster to load this node from global memory after the "Early Exit check"
// Load specified current Node from KD Tree
currNodes[tidx] = kdTree[currIdx];
// Get Best Fit Dist for checking child ranges
queryValue = ((currAxis == 0) ? queryPoints[tidx].x : queryPoints[tidx].y);
splitValue = ((currAxis == 0) ? currNodes[tidx].pos[0] : currNodes[tidx].pos[1]);
diff = splitValue - queryValue;
diff2 = diff*diff;
// Calc Dist from Median Node to queryLocation
dx = currNodes[tidx].pos[0] - queryPoints[tidx].x;
dy = currNodes[tidx].pos[1] - queryPoints[tidx].y;
diffDist2 = (dx*dx) + (dy*dy);
// Update closest point Idx
//if (diffDist2 < best.Dist)
//{
// best.Id = currIdx;
// best.Dist = diffDist2;
//}
best.Id = ((diffDist2 < best.Dist) ? currIdx : best.Id);
best.Dist = ((diffDist2 < best.Dist) ? diffDist2 : best.Dist);
if (queryValue <= splitValue)
{
// [...QL...BD]...SV -> Include Left range only
// or
// [...QL...SV...BD] -> Include Both Left and Right Sub Ranges
// Check if we should add Right Sub-range to stack
if (diff2 < best.Dist)
{
//nextIdx = currNodes[tidx].Right;
if (0xFFFFFFFF != currNodes[tidx].Right) // cInvalid
{
// Push offside search node onto top of stack
searchStack[stackTop][tidx].nodeFlags = (currNodes[tidx].Right & 0x1FFFFFFFU) | ((nextAxis << 29) & 0x60000000U) | 0x80000000U;
searchStack[stackTop][tidx].splitVal = splitValue;
stackTop++;
}
}
// Always Add Left Sub-range to search path
//nextIdx = currNodes[tidx].Left;
haveInfo = 0;
if (0xFFFFFFFF != currNodes[tidx].Left)
{
// Push onside search node onto top of stack
//searchStack[stackTop][tidx].nodeFlags = (currNodes[tidx].Left & 0x1FFFFFFFU) | ((nextAxis << 29) & 0x60000000U); // | 0x80000000U;
//searchStack[stackTop][tidx].splitVal = splitValue;
//stackTop++;
// Don't push node onto search stack, just update search info directly
currIdx = currNodes[tidx].Left;
currAxis = nextAxis;
currInOut = 0; // KD_IN
//splitValue = splitValue;
haveInfo = 1;
}
}
else
{
// SV...[BD...QL...] -> Include Right sub range only
// or
// [BD...SV...QL...] -> Include Both Left and Right Sub Ranges
// Check if we should add left sub-range to search path
if (diff2 < best.Dist)
{
// Add to search stack
//nextIdx = currNodes[tidx].Left;
if (0xFFFFFFFFU != currNodes[tidx].Left)
{
// Push offside node onto top of stack
searchStack[stackTop][tidx].nodeFlags = (currNodes[tidx].Left & 0x1FFFFFFFU) | ((nextAxis << 29) & 0x60000000U) | 0x80000000U;
searchStack[stackTop][tidx].splitVal = splitValue;
stackTop++;
}
}
// Always Add Right Sub-range
//nextIdx = currNodes[tidx].Right;
if (0xFFFFFFFFU != currNodes[tidx].Right)
{
// Push onside node top of stack
//searchStack[stackTop][tidx].nodeFlags = (currNodes[tidx].Right & 0x1FFFFFFFU) | ((nextAxis << 29) & 0x60000000U); // | 0x8000000U;
//searchStack[stackTop][tidx].splitVal = splitValue;
//stackTop++;
// Don't push node onto search stack, just update the search info directly
currIdx = currNodes[tidx].Right;
currAxis = nextAxis;
currInOut = 0; // KD_IN
//splitValue = splitValue;
haveInfo = 1;
}
}
}
// We now have the Best Index but we really need the best ID so grab it from ID list
best.Id = ids[best.Id];
// Turn Dist2 into true distance
best.Dist = sqrt( best.Dist );
// Store Result
qrs[qidx] = best;
}
#endif // #ifndef _KD_GPU3_H_
| 7a8efbb84e9e138706d37905ce0ea6899695ae82.cu | /*-----------------------------------------------------------------------------
CS 790-058 GPGPU
Final Project (Point Location using GPU)
This file contains the GPU Kernels
by Shawn Brown ([email protected])
-----------------------------------------------------------------------------*/
#ifndef _KD_GPU3_H_
#define _KD_GPU3_H_
/*---------------------------------------------------------
Includes
---------------------------------------------------------*/
#include <stdio.h>
//#include <float.h>
#include "KDTree_GPU.h"
#include "KD_API.h"
/*---------------------------------------------------------
Function Definitions
---------------------------------------------------------*/
/*---------------------------------------------------------
Name: KDTREE_DIST_V3
Desc: Finds Nearest Neighbor in KDTree
for each query point
Notes: Improved Version
Fewer memory accesses
and less stack space required
resulting in more threads being
able to run in parrallel
---------------------------------------------------------*/
__global__ void
KDTREE_DIST_V3
(
GPU_NN_Result * qrs, // OUT: Results of KD Nearest Neighbor Algorithm
float4 * qps, // IN: query points to compute distance for (1D or 2D field)
GPUNode_2D_MED * kdTree, // IN: KD Tree (Nodes)
unsigned int * ids, // IN: IDs (from Indexs)
int rootIdx, // IN: index of root node in KD Tree
int w // IN: width of 2D query field (# of columns)
)
{
// Local Parameters
GPU_NN_Result best;
unsigned int currIdx, currAxis, currInOut, nextAxis;
float dx, dy;
float diff, diff2;
float diffDist2;
float queryValue, splitValue;
unsigned int stackTop = 0;
__shared__ float4 queryPoints[KD_THREADS_PER_BLOCK];
__shared__ GPUNode_2D_MED currNodes[KD_THREADS_PER_BLOCK];
__shared__ GPU_Search searchStack[KD_STACK_SIZE][KD_THREADS_PER_BLOCK];
unsigned int haveInfo;
const int threadsPerRow = blockDim.x; // Columns (per block)
const int rowsPerBlock = blockDim.y; // Rows (per block)
// Block index
int bx = blockIdx.x; // column in grid
int by = blockIdx.y; // row in grid
// Thread index
int tx = threadIdx.x; // column in block
int ty = threadIdx.y; // row in block
int tidx = (ty*threadsPerRow) + tx;
// Compute Query Index
int currCol = (bx * threadsPerRow) + tx;
int currRow = (by * rowsPerBlock) + ty;
int qidx = currRow * w + currCol;
// Load current Query Point into local (fast) memory
queryPoints[tidx] = qps[qidx];
// Set Initial Guess equal to root node
best.Id = rootIdx;
best.Dist = 3.0e+38F; // Choose A huge Number to start with for Best Distance
//best.cNodes = 0;
// Store root info
haveInfo = 1;
currIdx = rootIdx;
currAxis = 0;
currInOut = 0; // Outside
splitValue = 3.0e+38f; // Use huge value to simulate infinity
nextAxis = 1;
// Load root node into local fast node
//currNodes[tidx] = kdTree[currIdx]
// No longer add to stack top
// Put root search info on stack
//searchStack[stackTop][tidx].nodeFlags = (rootIdx & 0x1FFFFFFF); // | ((currAxis << 29) & 0x60000000); // | ((currInOut << 31) & 0x8000000);;
//searchStack[stackTop][tidx].splitVal = 3.0e+38F;
//stackTop++;
while (stackTop != 0)
{
// Statistics
//best.cNodes++;
if (haveInfo == 0)
{
// Get current Search Node from top of stack
stackTop--;
// Get Node Info
currIdx = (searchStack[stackTop][tidx].nodeFlags & 0x1FFFFFFFU);
currAxis = (searchStack[stackTop][tidx].nodeFlags & 0x60000000U) >> 29;
currInOut = (searchStack[stackTop][tidx].nodeFlags & 0x80000000U) >> 31;
splitValue = searchStack[stackTop][tidx].splitVal; // Get Split Value of Parent Node
nextAxis = ((currAxis == 0) ? 1 : 0);
// Early Exit Check
if (currInOut == 1) // KD_OUT
{
// Next Line is effectively queryValue = queryPoints[prevAxis];
queryValue = ((currAxis == 0) ? queryPoints[tidx].y : queryPoints[tidx].x);
//splitValue = searchStack[stackTop][tidx].splitVal; // Split Value of Parent Node
diff = splitValue - queryValue;
diff2 = diff*diff;
if (diff2 >= best.Dist)
{
// We can do an early exit for this node
continue;
}
}
}
// else
//{
// Already have info from root or traversing onside node
//}
// WARNING - It's much faster to load this node from global memory after the "Early Exit check"
// Load specified current Node from KD Tree
currNodes[tidx] = kdTree[currIdx];
// Get Best Fit Dist for checking child ranges
queryValue = ((currAxis == 0) ? queryPoints[tidx].x : queryPoints[tidx].y);
splitValue = ((currAxis == 0) ? currNodes[tidx].pos[0] : currNodes[tidx].pos[1]);
diff = splitValue - queryValue;
diff2 = diff*diff;
// Calc Dist from Median Node to queryLocation
dx = currNodes[tidx].pos[0] - queryPoints[tidx].x;
dy = currNodes[tidx].pos[1] - queryPoints[tidx].y;
diffDist2 = (dx*dx) + (dy*dy);
// Update closest point Idx
//if (diffDist2 < best.Dist)
//{
// best.Id = currIdx;
// best.Dist = diffDist2;
//}
best.Id = ((diffDist2 < best.Dist) ? currIdx : best.Id);
best.Dist = ((diffDist2 < best.Dist) ? diffDist2 : best.Dist);
if (queryValue <= splitValue)
{
// [...QL...BD]...SV -> Include Left range only
// or
// [...QL...SV...BD] -> Include Both Left and Right Sub Ranges
// Check if we should add Right Sub-range to stack
if (diff2 < best.Dist)
{
//nextIdx = currNodes[tidx].Right;
if (0xFFFFFFFF != currNodes[tidx].Right) // cInvalid
{
// Push offside search node onto top of stack
searchStack[stackTop][tidx].nodeFlags = (currNodes[tidx].Right & 0x1FFFFFFFU) | ((nextAxis << 29) & 0x60000000U) | 0x80000000U;
searchStack[stackTop][tidx].splitVal = splitValue;
stackTop++;
}
}
// Always Add Left Sub-range to search path
//nextIdx = currNodes[tidx].Left;
haveInfo = 0;
if (0xFFFFFFFF != currNodes[tidx].Left)
{
// Push onside search node onto top of stack
//searchStack[stackTop][tidx].nodeFlags = (currNodes[tidx].Left & 0x1FFFFFFFU) | ((nextAxis << 29) & 0x60000000U); // | 0x80000000U;
//searchStack[stackTop][tidx].splitVal = splitValue;
//stackTop++;
// Don't push node onto search stack, just update search info directly
currIdx = currNodes[tidx].Left;
currAxis = nextAxis;
currInOut = 0; // KD_IN
//splitValue = splitValue;
haveInfo = 1;
}
}
else
{
// SV...[BD...QL...] -> Include Right sub range only
// or
// [BD...SV...QL...] -> Include Both Left and Right Sub Ranges
// Check if we should add left sub-range to search path
if (diff2 < best.Dist)
{
// Add to search stack
//nextIdx = currNodes[tidx].Left;
if (0xFFFFFFFFU != currNodes[tidx].Left)
{
// Push offside node onto top of stack
searchStack[stackTop][tidx].nodeFlags = (currNodes[tidx].Left & 0x1FFFFFFFU) | ((nextAxis << 29) & 0x60000000U) | 0x80000000U;
searchStack[stackTop][tidx].splitVal = splitValue;
stackTop++;
}
}
// Always Add Right Sub-range
//nextIdx = currNodes[tidx].Right;
if (0xFFFFFFFFU != currNodes[tidx].Right)
{
// Push onside node top of stack
//searchStack[stackTop][tidx].nodeFlags = (currNodes[tidx].Right & 0x1FFFFFFFU) | ((nextAxis << 29) & 0x60000000U); // | 0x8000000U;
//searchStack[stackTop][tidx].splitVal = splitValue;
//stackTop++;
// Don't push node onto search stack, just update the search info directly
currIdx = currNodes[tidx].Right;
currAxis = nextAxis;
currInOut = 0; // KD_IN
//splitValue = splitValue;
haveInfo = 1;
}
}
}
// We now have the Best Index but we really need the best ID so grab it from ID list
best.Id = ids[best.Id];
// Turn Dist2 into true distance
best.Dist = sqrt( best.Dist );
// Store Result
qrs[qidx] = best;
}
#endif // #ifndef _KD_GPU3_H_
|
d521015ec78fbd3253ff103170f68d991cc86364.hip | // !!! This is a file automatically generated by hipify!!!
/*M///////////////////////////////////////////////////////////////////////////////////////
//
// IMPORTANT: READ BEFORE DOWNLOADING, COPYING, INSTALLING OR USING.
//
// By downloading, copying, installing or using the software you agree to this license.
// If you do not agree to this license, do not download, install,
// copy or use the software.
//
//
// License Agreement
// For Open Source Computer Vision Library
//
// Copyright (C) 2000-2008, Intel Corporation, all rights reserved.
// Copyright (C) 2009, Willow Garage Inc., all rights reserved.
// Third party copyrights are property of their respective owners.
//
// Redistribution and use in source and binary forms, with or without modification,
// are permitted provided that the following conditions are met:
//
// * Redistribution's of source code must retain the above copyright notice,
// this list of conditions and the following disclaimer.
//
// * Redistribution's in binary form must reproduce the above copyright notice,
// this list of conditions and the following disclaimer in the documentation
// and/or other materials provided with the distribution.
//
// * The name of the copyright holders may not be used to endorse or promote products
// derived from this software without specific prior written permission.
//
// This software is provided by the copyright holders and contributors "as is" and
// any express or implied warranties, including, but not limited to, the implied
// warranties of merchantability and fitness for a particular purpose are disclaimed.
// In no event shall the Intel Corporation or contributors be liable for any direct,
// indirect, incidental, special, exemplary, or consequential damages
// (including, but not limited to, procurement of substitute goods or services;
// loss of use, data, or profits; or business interruption) however caused
// and on any theory of liability, whether in contract, strict liability,
// or tort (including negligence or otherwise) arising in any way out of
// the use of this software, even if advised of the possibility of such damage.
//
//M*/
#if !defined CUDA_DISABLER
#include "column_filter.h"
#ifndef OPENCV_TINY_GPU_MODULE
namespace filter
{
template void linearColumn<float3, ushort3>(PtrStepSzb src, PtrStepSzb dst, const float* kernel, int ksize, int anchor, int brd_type, int cc, hipStream_t stream);
}
#endif
#endif /* CUDA_DISABLER */
| d521015ec78fbd3253ff103170f68d991cc86364.cu | /*M///////////////////////////////////////////////////////////////////////////////////////
//
// IMPORTANT: READ BEFORE DOWNLOADING, COPYING, INSTALLING OR USING.
//
// By downloading, copying, installing or using the software you agree to this license.
// If you do not agree to this license, do not download, install,
// copy or use the software.
//
//
// License Agreement
// For Open Source Computer Vision Library
//
// Copyright (C) 2000-2008, Intel Corporation, all rights reserved.
// Copyright (C) 2009, Willow Garage Inc., all rights reserved.
// Third party copyrights are property of their respective owners.
//
// Redistribution and use in source and binary forms, with or without modification,
// are permitted provided that the following conditions are met:
//
// * Redistribution's of source code must retain the above copyright notice,
// this list of conditions and the following disclaimer.
//
// * Redistribution's in binary form must reproduce the above copyright notice,
// this list of conditions and the following disclaimer in the documentation
// and/or other materials provided with the distribution.
//
// * The name of the copyright holders may not be used to endorse or promote products
// derived from this software without specific prior written permission.
//
// This software is provided by the copyright holders and contributors "as is" and
// any express or implied warranties, including, but not limited to, the implied
// warranties of merchantability and fitness for a particular purpose are disclaimed.
// In no event shall the Intel Corporation or contributors be liable for any direct,
// indirect, incidental, special, exemplary, or consequential damages
// (including, but not limited to, procurement of substitute goods or services;
// loss of use, data, or profits; or business interruption) however caused
// and on any theory of liability, whether in contract, strict liability,
// or tort (including negligence or otherwise) arising in any way out of
// the use of this software, even if advised of the possibility of such damage.
//
//M*/
#if !defined CUDA_DISABLER
#include "column_filter.h"
#ifndef OPENCV_TINY_GPU_MODULE
namespace filter
{
template void linearColumn<float3, ushort3>(PtrStepSzb src, PtrStepSzb dst, const float* kernel, int ksize, int anchor, int brd_type, int cc, cudaStream_t stream);
}
#endif
#endif /* CUDA_DISABLER */
|
4e8d2fda2916596253f178fbe901c80eda22346f.hip | // !!! This is a file automatically generated by hipify!!!
#include "TimingGPU.cuh"
#include <hip/hip_runtime.h>
#include <hip/hip_runtime.h>
struct PrivateTimingGPU {
hipEvent_t start;
hipEvent_t stop;
};
// default constructor
TimingGPU::TimingGPU() { privateTimingGPU = new PrivateTimingGPU; }
// default destructor
TimingGPU::~TimingGPU() { }
void TimingGPU::StartCounter()
{
hipEventCreate(&((*privateTimingGPU).start));
hipEventCreate(&((*privateTimingGPU).stop));
hipEventRecord((*privateTimingGPU).start,0);
}
void TimingGPU::StartCounterFlags()
{
int eventflags = hipEventBlockingSync;
hipEventCreateWithFlags(&((*privateTimingGPU).start),eventflags);
hipEventCreateWithFlags(&((*privateTimingGPU).stop),eventflags);
hipEventRecord((*privateTimingGPU).start,0);
}
// Gets the counter in ms
float TimingGPU::GetCounter()
{
float time;
hipEventRecord((*privateTimingGPU).stop, 0);
hipEventSynchronize((*privateTimingGPU).stop);
hipEventElapsedTime(&time,(*privateTimingGPU).start,(*privateTimingGPU).stop);
return time;
}
| 4e8d2fda2916596253f178fbe901c80eda22346f.cu | #include "TimingGPU.cuh"
#include <cuda.h>
#include <cuda_runtime.h>
struct PrivateTimingGPU {
cudaEvent_t start;
cudaEvent_t stop;
};
// default constructor
TimingGPU::TimingGPU() { privateTimingGPU = new PrivateTimingGPU; }
// default destructor
TimingGPU::~TimingGPU() { }
void TimingGPU::StartCounter()
{
cudaEventCreate(&((*privateTimingGPU).start));
cudaEventCreate(&((*privateTimingGPU).stop));
cudaEventRecord((*privateTimingGPU).start,0);
}
void TimingGPU::StartCounterFlags()
{
int eventflags = cudaEventBlockingSync;
cudaEventCreateWithFlags(&((*privateTimingGPU).start),eventflags);
cudaEventCreateWithFlags(&((*privateTimingGPU).stop),eventflags);
cudaEventRecord((*privateTimingGPU).start,0);
}
// Gets the counter in ms
float TimingGPU::GetCounter()
{
float time;
cudaEventRecord((*privateTimingGPU).stop, 0);
cudaEventSynchronize((*privateTimingGPU).stop);
cudaEventElapsedTime(&time,(*privateTimingGPU).start,(*privateTimingGPU).stop);
return time;
}
|
1e5aad3cab34b404ff130b102ac76df8ca6017d6.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <ATen/native/TensorAdvancedIndexing.h>
#include <ATen/native/IndexingUtils.h>
#include <ATen/ATen.h>
#include <ATen/ceil_div.h>
#include <ATen/NativeFunctions.h>
#include <ATen/ExpandUtils.h>
#include <ATen/MemoryOverlap.h>
#include <ATen/native/TensorIterator.h>
#include <ATen/native/hip/Loops.cuh>
#include <ATen/native/Resize.h>
#include <ATen/AccumulateType.h>
#include <ATen/hip/detail/IndexUtils.cuh>
#include <ATen/hip/Atomic.cuh>
#include <ATen/hip/HIPUtils.h>
#include <ATen/hip/HIPContext.h>
#include <ATen/hip/cub.h>
#include <c10/util/irange.h>
#include <c10/core/QScheme.h>
#include <limits>
#include <c10/macros/Macros.h>
namespace {
template <typename scalar_t, int SZ>
__global__ void indexing_backward_kernel(
int64_t* sorted_indices, int64_t* indices, scalar_t* grad_output, scalar_t* grad_weight,
int64_t numel, int64_t stride, int64_t stride_before, int64_t outer_dim, bool accumulate) {
//numel is total number of flattened indices, not expanded to dimensions that are not indexed.
//stride is the cumulative size of the not-indexed last dimensions
//stride_before is the stride of the dimension immediately preceding first indexed dimension
//if indexing starts from the 0th dimension, stride_before does not matter because blockIdx.z will be 0 in this case
//outer_dim is number of elements in the first unindexed dimensions
using accscalar_t = at::acc_type<scalar_t, true>;
// Each warp is responsible for an input into the LookupTable.
// If the preceding input has the same destination index as this input, then the warp
// exits immediately. The warp also processes subsequent inputs with the
// same value.
//
// Input Warp
// 1 <warp 1>
// 1 <warp 1> (<warp 2> exits without doing any work)
// 5 <warp 3>
// 8 <warp 4>
// Number of values processed by each thread (grain size)
for (int64_t z = blockIdx.z; z < outer_dim; z += gridDim.z){
int64_t idx = blockIdx.x * blockDim.y + threadIdx.y;
if (idx < numel
&& (idx == 0 || sorted_indices[idx] != sorted_indices[idx - 1])){
do {
int64_t start_feature = threadIdx.x + blockIdx.y * blockDim.x * SZ;
// if not accumulate, we only keep the last duplicate index so skip those before it
if (!accumulate && (idx < numel - 1) && sorted_indices[idx] == sorted_indices[idx + 1]) {
idx++;
continue;
}
const int64_t weight_row = ((int64_t) sorted_indices[idx]) * stride + z * stride_before;
const int64_t grad_row = ((int64_t) indices[idx]) * stride + z * numel * stride;
const accscalar_t scale = (accscalar_t)1.0;
accscalar_t gradient[SZ];
accscalar_t weight[SZ];
while (start_feature < stride) {
#pragma unroll
for (int ii = 0; ii < SZ; ii++) {
int64_t feature_dim = start_feature + ii * C10_WARP_SIZE;
if (feature_dim < stride) {
gradient[ii] = static_cast<accscalar_t>(grad_output[grad_row + feature_dim]);
if (accumulate) {
weight[ii] = static_cast<accscalar_t>(grad_weight[weight_row + feature_dim]);
}
}
}
#pragma unroll
for (int ii = 0; ii < SZ; ii++) {
if (accumulate) {
weight[ii] += gradient[ii] * scale;
} else {
weight[ii] = gradient[ii] * scale;
}
}
#pragma unroll
for (int ii = 0; ii < SZ; ii++) {
int64_t feature_dim = start_feature + ii * C10_WARP_SIZE;
if (feature_dim < stride) {
grad_weight[weight_row + feature_dim] = static_cast<scalar_t>(weight[ii]);
}
}
start_feature += gridDim.y * blockDim.x * SZ;
}
idx++;
} while (idx < numel && sorted_indices[idx] == sorted_indices[idx - 1]);
}
}
}
}
namespace at { namespace native {
static Tensor wrapIndexOnce(const Tensor & index, int64_t dim, int64_t dim_size, bool check_range=true) {
//we don't need to check range in backward - if there were out of bounds indices forward should already have errored out
if (index.numel() != 0 && check_range) {
auto max_idx = index.max().item<int64_t>();
auto min_idx = index.min().item<int64_t>();
if (max_idx >= dim_size) {
TORCH_CHECK_INDEX(false, "index ", max_idx, " is out of bounds for dimension ", dim, " with size ", dim_size);
}
if (min_idx < -dim_size) {
TORCH_CHECK_INDEX(false, "index ", min_idx, " is out of bounds for dimension ", dim, " with size ", dim_size);
}
}
return index.remainder(dim_size);
}
static std::vector<int64_t> computeLinearStride(const Tensor & tensor) {
// computes the stride as if tensor were contiguous
auto sizes = tensor.sizes();
std::vector<int64_t> stride(tensor.dim());
stride[tensor.dim() - 1] = 1;
std::partial_sum(sizes.rbegin(), sizes.rend() - 1, stride.rbegin() + 1, std::multiplies<int64_t>());
return stride;
}
static std::tuple<Tensor, int64_t, int64_t, int64_t>
computeLinearIndex(const Tensor & src, TensorList indices, bool check_range) {
auto strides = computeLinearStride(src);
const auto& device = src.options().device();
// Compute the linear index by multiplying the indexing tensors by the
// stride and summing them. All the indexing tensors have the same shape at
// this point. We also compute the number of dimensions before and after that
// are not being index.
Tensor linearIndex;
int64_t emptyBefore = 0, emptyAfter = 0, nElemBefore = 1, nElemAfter = 1, strideBefore =0;
for (const auto i: c10::irange(src.dim())) {
if (indices[i].defined()) {
// Cast index to the longType matching src's device
// This allows us to support ie indexing a cuda tensor with a cpu tensor
Tensor index = (wrapIndexOnce(indices[i], i, src.size(i), check_range) * strides[i]).to(device);
if (linearIndex.defined()) {
linearIndex += index;
} else {
linearIndex = index;
if (i>0) {
strideBefore = src.stride(i-1); // stride after undefined dimensions
}
}
} else if (linearIndex.defined()) {
emptyAfter++;
nElemAfter *= src.size(i);
} else {
emptyBefore++;
nElemBefore *= src.size(i);
}
}
return std::make_tuple(std::move(linearIndex), nElemBefore, strideBefore, nElemAfter);
}
static std::tuple<Tensor, Tensor, int64_t, int64_t, int64_t, std::vector<int64_t>> makeLinearIndex(Tensor self, const c10::List<c10::optional<at::Tensor>>& orig, bool check_range) {
checkIndexTensorTypes(orig);
// first expand BoolTensor (masks) or ByteTensor (masks) into 1 or more LongTensors
auto indices = expandTensors(self, orig);
// next broadcast all index tensors together
indices = expand_outplace(indices);
// add missing null Tensors so that it matches self.dim()
while (indices.size() < (size_t)self.dim()) {
indices.emplace_back();
}
// if the non-null indices are not all adjacent, transpose self and indices
// together so that they're adjacent at the front
std::vector<int64_t> inversePerm;
if (!hasContiguousSubspace(indices)) {
std::tie(self, indices, inversePerm) = transposeToFrontAndInvPerm(self, indices);
}
int64_t nElemBefore, strideBefore, nElemAfter;
Tensor linearIndex;
std::tie(linearIndex, nElemBefore, strideBefore, nElemAfter) = computeLinearIndex(self, indices, check_range);
return std::make_tuple(linearIndex, self, nElemBefore, strideBefore, nElemAfter, inversePerm);
}
void index_put_with_sort_kernel_thrust_helper(Tensor &linearIndex, Tensor &orig_indices, Tensor &sorted_indices, int64_t num_indices);
namespace {
int64_t largestIndex(const Tensor &self) {
int64_t result = 0;
for (const auto i: c10::irange(self.dim())) {
result += (self.sizes()[i] - 1) * self.strides()[i];
}
return result;
}
void index_put_with_sort_kernel(Tensor & self, const c10::List<c10::optional<Tensor>>& indices, const Tensor & value, bool accumulate, bool unsafe) {
if (indices.size() > (size_t)self.dim()) {
TORCH_CHECK_INDEX(false, "too many indices for tensor of dimension ", self.dim(), " (got ", indices.size(), ")");
}
if (!self.is_contiguous()) {
self = self.contiguous();
}
Tensor linearIndex, src, expandedValue = value;
int64_t nElemBefore, strideBefore, sliceSize;
std::vector<int64_t> inversePerm;
std::tie(linearIndex, src, nElemBefore, strideBefore, sliceSize, inversePerm) = makeLinearIndex(self, indices, !unsafe);
int64_t num_indices = linearIndex.numel();
if (expandedValue.numel() < num_indices * nElemBefore * sliceSize) {
auto expanded_size = at::DimVector(expandedValue.sizes());
auto size1 = expandedValue.sizes();
auto size2 = linearIndex.sizes();
if (are_expandable(size1, size2)) {
expanded_size = infer_size_dimvector(size1, size2);
}
if (nElemBefore > 1) {
expanded_size.insert(expanded_size.begin(), nElemBefore);
}
expandedValue = expandedValue.expand(expanded_size);
}
expandedValue = expandedValue.contiguous();
if (num_indices > 0 && sliceSize > 0) {
const bool permuted = !src.is_contiguous();
auto src_ = permuted ? src.contiguous() : src;
linearIndex = linearIndex.reshape(-1);
auto sorted_indices = at::empty_like(linearIndex, LEGACY_CONTIGUOUS_MEMORY_FORMAT);
auto orig_indices = at::empty_like(linearIndex, LEGACY_CONTIGUOUS_MEMORY_FORMAT);
const hipStream_t stream = at::hip::getCurrentHIPStreamMasqueradingAsCUDA();
linearIndex.divide_(sliceSize, "trunc");
// cub on CUDA <= 11.2 have a bug that for small sizes
// cub's sort can be much slower than thrust's merge sort
// this bug is fixed in CUDA 11.3
#if (defined(TORCH_HIP_VERSION) && TORCH_HIP_VERSION < 11030) || defined(USE_ROCM)
if (num_indices < 50000) {
index_put_with_sort_kernel_thrust_helper(linearIndex, orig_indices, sorted_indices, num_indices);
} else
#endif
{
// Sort the inputs into sorted with the corresponding indices
auto range = at::arange(num_indices, linearIndex.options());
// linearIndex can not be negative, and we take advantage of this
// fact to sort on less bits for better performance.
int64_t nbits = cuda::cub::get_num_bits(largestIndex(self) / sliceSize);
cuda::cub::radix_sort_pairs(
linearIndex.data_ptr<int64_t>(), sorted_indices.data_ptr<int64_t>(),
range.data_ptr<int64_t>(), orig_indices.data_ptr<int64_t>(),
num_indices, false, 0, nbits);
}
TORCH_INTERNAL_ASSERT(
linearIndex.numel()*sliceSize*nElemBefore == expandedValue.numel(),
"number of flattened indices did not match number of elements in the value tensor: ",
linearIndex.numel()*sliceSize*nElemBefore, " vs ", expandedValue.numel());
const int UNROLL = 4;
const int indices_per_block = 4;
dim3 grid(ceil_div(num_indices, (int64_t) indices_per_block),
std::min<int>(at::cuda::getCurrentDeviceProperties()->maxGridSize[1], ceil_div(sliceSize, (int64_t) (C10_WARP_SIZE*UNROLL))),
::min(std::max<int>(1,nElemBefore), at::cuda::getCurrentDeviceProperties()->maxGridSize[2]));
dim3 block(C10_WARP_SIZE, indices_per_block);
AT_DISPATCH_ALL_TYPES_AND_COMPLEX_AND3(at::ScalarType::Half, at::ScalarType::Bool, at::ScalarType::BFloat16,
expandedValue.scalar_type(), "indexing_backward", [&] {
hipLaunchKernelGGL(( indexing_backward_kernel<scalar_t, UNROLL>), dim3(grid), dim3(block), 0, stream,
sorted_indices.data_ptr<int64_t>(),
orig_indices.data_ptr<int64_t>(),
expandedValue.data_ptr<scalar_t>(),
src_.data_ptr<scalar_t>(),
num_indices,
sliceSize,
strideBefore,
nElemBefore,
accumulate);
C10_HIP_KERNEL_LAUNCH_CHECK();
});
if (permuted) {
self.copy_(src_.permute(inversePerm));
}
}
}
REGISTER_CUDA_DISPATCH(index_put_with_sort_stub, &index_put_with_sort_kernel);
} //anonymous
// Check tensor dimensions for index operations, and return the slice size.
static ptrdiff_t getSliceSize(const Tensor & dst,
int dim,
const Tensor & index,
const Tensor & src)
{
const auto dstDims = dst.dim();
const auto srcDims = src.dim();
TORCH_CHECK(index.dim() <= 1, "Index must be vector or scalar");
ptrdiff_t dstSliceSize = 1;
TORCH_CHECK(dim >= 0 && dim < dstDims, "Indexing dim ", dim, " is out of bounds");
for (const auto d: c10::irange(dstDims)) {
if (d != dim) {
dstSliceSize *= dst.size(d);
}
}
TORCH_CHECK(dim < srcDims, "Indexing dim ", dim, " is out of bounds");
TORCH_CHECK(index.numel() == src.size(dim),
"length of src.size[dim] is not equal to length of indices");
ptrdiff_t srcSliceSize = 1;
bool mismatch = false;
if (dstDims != srcDims) mismatch = true;
for (const auto d: c10::irange(srcDims)) {
if (d != dim) {
srcSliceSize *= src.size(d);
if (!mismatch && dst.size(d) != src.size(d)) mismatch = true;
}
}
TORCH_CHECK(dstSliceSize == srcSliceSize,
"Source/destination tensor have different slice sizes (%ld vs %ld)",
dstSliceSize, srcSliceSize);
if (mismatch) {
TORCH_WARN_ONCE(
"Warning: source/destination slices have same size but different "
"shape for an index operation. This behavior is deprecated.\n");
}
return dstSliceSize;
}
// We prefer this kernel to avoid reloading index points if the number
// of indices is a small number.
// This kernel in fact works for all choices of problem size, but if
// the number of indices chosen is large, then the
// indexAddLargeIndex kernel is a better choice to increase
// parallelism.
template <typename T, typename IndicesType, typename IndexType, int DstDim, int SrcDim, int IdxDim>
__global__ void indexAddSmallIndex(cuda::detail::TensorInfo<T, IndexType> dst,
cuda::detail::TensorInfo<T, IndexType> src,
cuda::detail::TensorInfo<IndicesType, IndexType> indices,
int dstAddDim,
int srcAddDim,
IndexType innerSize,
int64_t dstAddDimSize,
T alpha) {
// In order to avoid reloading the index that we are copying, load
// it once to handle all of the points that are being selected, so
// it can be reused as much as possible. This kernel is chosen when
// this is a good choice (small number of chosen indices), since
// re-accessing indices in addition to src elements can be slow.
for (IndexType srcIndex = 0; srcIndex < indices.sizes[0]; ++srcIndex) {
// Lua indices begin at 1
IndexType dstIndex =
indices.data[cuda::detail::IndexToOffset<IndicesType, IndexType, IdxDim>::get(srcIndex, indices)];
CUDA_KERNEL_ASSERT(dstIndex < dstAddDimSize);
// We stride over the output ignoring the indexed dimension
// (innerSize), whose offset calculation is handled differently
for (IndexType linearIndex = blockIdx.x * blockDim.x + threadIdx.x;
linearIndex < innerSize;
linearIndex += gridDim.x * blockDim.x) {
IndexType dstOffset =
cuda::detail::IndexToOffset<T, IndexType, DstDim>::get(linearIndex, dst);
dstOffset += dstIndex * dst.strides[dstAddDim];
IndexType srcOffset =
cuda::detail::IndexToOffset<T, IndexType, SrcDim>::get(linearIndex, src);
srcOffset += srcIndex * src.strides[srcAddDim];
gpuAtomicAddNoReturn(&dst.data[dstOffset], src.data[srcOffset] * alpha);
}
}
}
// We prefer this kernel to balance parallelism across index points,
// if there are a large number of indices.
// This kernel in fact works for all choices of problem size, but if
// the number of indices chosen is small, then the
// indexAddSmallIndex kernel is a better choice to reduce memory
// accesses.
template <typename T, typename IndicesType, typename IndexType, int DstDim, int SrcDim, int IdxDim,
bool IndexIsMajor>
__global__ void indexAddLargeIndex(cuda::detail::TensorInfo<T, IndexType> dst,
cuda::detail::TensorInfo<T, IndexType> src,
cuda::detail::TensorInfo<IndicesType, IndexType> indices,
int dstAddDim,
int srcAddDim,
IndexType totalSize,
IndexType innerSize,
int64_t dstAddDimSize,
T alpha) {
// We stride over the output including the indexed dimension
// (totalSize), and calculate the destination index point based on that
for (IndexType linearIndex = blockIdx.x * blockDim.x + threadIdx.x;
linearIndex < totalSize;
linearIndex += gridDim.x * blockDim.x) {
IndexType srcIndex, elementInSlice;
if (IndexIsMajor) {
srcIndex = linearIndex / innerSize;
elementInSlice = linearIndex % innerSize;
}
else {
elementInSlice = linearIndex / innerSize;
srcIndex = linearIndex % innerSize;
}
// Lua indices begin at 1
IndexType dstIndex =
indices.data[cuda::detail::IndexToOffset<IndicesType, IndexType, IdxDim>::get(srcIndex, indices)];
CUDA_KERNEL_ASSERT(dstIndex < dstAddDimSize);
IndexType dstOffset =
cuda::detail::IndexToOffset<T, IndexType, DstDim>::get(elementInSlice, dst);
dstOffset += dstIndex * dst.strides[dstAddDim];
IndexType srcOffset =
cuda::detail::IndexToOffset<T, IndexType, SrcDim>::get(elementInSlice, src);
srcOffset += srcIndex * src.strides[srcAddDim];
gpuAtomicAddNoReturn(&dst.data[dstOffset], src.data[srcOffset] * alpha);
}
}
// Compare the stride between adjacent slices (sliceStride) with strides in the
// other dimensions (i.e., strides *inside* each slice).
//
// - Returns true if some dimension inside the slice has lower stride than
// sliceStride. The simplest example is a 2-D contiguous tensor with sliceDim
// == 0 (that is, each slice is a row).
//
// In this case, we choose the CUDA kernel that processes the data in
// "index-major order". For example, if thread count equals slice size, then
// all threads process slice #0 in lockstep, and then slice #1, and so on.
//
// - Otherwise (i.e., sliceStride has the lowest value), this function returns
// false. The simplest example is a 2-D contiguous tensor with sliceDim == 1
// (each slice is a column).
//
// In this case, we choose the CUDA kernel that processes the data in
// "elementInSlice-major order". For example, each thread can process element
// #0 of every slice, and then element #1 of every slice, and so on.
template <typename scalar_t>
bool indexShouldBeMajor(cuda::detail::TensorInfo<scalar_t, unsigned int> &info,
int sliceDim)
{
// The stride between adjacent slices (e.g., between element #0 of slice #100
// and element #0 of slice #101).
unsigned int sliceStride = info.strides[sliceDim];
for (const auto i: c10::irange(info.dims)) {
if (i != sliceDim && info.sizes[i] > 1 && info.strides[i] < sliceStride) {
return true;
}
}
return false;
}
void index_add_cuda_impl(const Tensor& self, int64_t dim, const Tensor& index, const Tensor& source, const Scalar& alpha, const Tensor& result) {
if (!result.is_same(self)) result.copy_(self);
// Scalars are treated as 1-d tensor
Tensor self_ = (result.dim() == 0) ? result.view(1) : result;
Tensor source_ = (source.dim() == 0) ? source.view(1) : source;
TORCH_CHECK(result.dim() <= MAX_TENSORINFO_DIMS, "tensor has too many (>", MAX_TENSORINFO_DIMS, ") dims");
TORCH_CHECK(source.dim() <= MAX_TENSORINFO_DIMS, "tensor has too many (>", MAX_TENSORINFO_DIMS, ") dims" );
TORCH_CHECK(index.dim() <= MAX_TENSORINFO_DIMS, "tensor has too many (>", MAX_TENSORINFO_DIMS, ") dims");
if (globalContext().deterministicAlgorithms()){
torch::List<c10::optional<Tensor>> indices;
indices.reserve(dim + 1);
for (const auto i: c10::irange(dim)) {
indices.emplace_back();
}
indices.emplace_back(index.to(at::kLong));
result.index_put_(indices, source * alpha, true);
return;
}
// The `source` is partitioned into two parts:
// -the size of each slice we are indexing, which is the
// total size of the tensor ignoring dimension `dim`;
// -the number of index we are choosing, which is the total size
// of the tensor `index`.
ptrdiff_t sliceSize = getSliceSize(self_, dim, index, source_);
ptrdiff_t sourceTotalSize = source.numel();
int64_t selfAddDimSize = self_.size(dim);
ptrdiff_t numIndex = index.numel();
if (sliceSize == 0) {
return;
}
const hipStream_t stream = at::hip::getCurrentHIPStreamMasqueradingAsCUDA();
bool indContig = index.is_contiguous();
int mpc = at::cuda::getCurrentDeviceProperties()->multiProcessorCount;
#define SMALL_INDEX(TENSOR_TYPE, INDICES_TYPE, TYPE, SELF_DIM, SOURCE_DIM, IDX_DIM) \
hipLaunchKernelGGL(( indexAddSmallIndex<TENSOR_TYPE, INDICES_TYPE, TYPE, SELF_DIM, SOURCE_DIM, IDX_DIM>) \
, dim3(smallIndexGrid), dim3(smallIndexBlock), 0, stream, \
selfInfo, sourceInfo, indexInfo, \
selfAddDim, sourceAddDim, sliceSize, selfAddDimSize, alpha_value); \
C10_HIP_KERNEL_LAUNCH_CHECK();
#define LARGE_INDEX(TENSOR_TYPE, INDICES_TYPE, TYPE, \
SELF_DIM, SOURCE_DIM, IDX_DIM, IDX_IS_MAJOR) \
hipLaunchKernelGGL(( indexAddLargeIndex<TENSOR_TYPE, INDICES_TYPE, TYPE, \
SELF_DIM, SOURCE_DIM, IDX_DIM, IDX_IS_MAJOR>) \
, dim3(largeIndexGrid), dim3(largeIndexBlock), 0, stream, \
selfInfo, sourceInfo, indexInfo, \
selfAddDim, sourceAddDim, sourceTotalSize, \
(IDX_IS_MAJOR) ? sliceSize : numIndex, \
selfAddDimSize, alpha_value); \
C10_HIP_KERNEL_LAUNCH_CHECK();
dim3 smallIndexGrid(::min(ceil_div(sliceSize, (ptrdiff_t)128), (ptrdiff_t)(mpc * 8)));
dim3 smallIndexBlock(::min(sliceSize, (ptrdiff_t)128));
dim3 largeIndexGrid(::min(ceil_div(sourceTotalSize, (ptrdiff_t)128), (ptrdiff_t)(mpc * 8)));
dim3 largeIndexBlock(::min(sourceTotalSize, (ptrdiff_t)128));
if (cuda::detail::canUse32BitIndexMath(result) &&
cuda::detail::canUse32BitIndexMath(source) &&
cuda::detail::canUse32BitIndexMath(index)) {
AT_DISPATCH_ALL_TYPES_AND_COMPLEX_AND3(at::ScalarType::Bool, at::ScalarType::Half, at::ScalarType::BFloat16, result.scalar_type(), "index_add", [&] {
cuda::detail::TensorInfo<scalar_t, unsigned int> selfInfo =
cuda::detail::getTensorInfo<scalar_t, unsigned int>(self_);
int selfAddDim = selfInfo.collapseDims(dim);
selfInfo.reduceDim(selfAddDim);
auto alpha_value = alpha.to<scalar_t>();
AT_DISPATCH_INDEX_TYPES(index.scalar_type(), "index_add_cuda_", [&] () {
auto sourceInfo =
cuda::detail::getTensorInfo<scalar_t, unsigned int>(source_);
int sourceAddDim = sourceInfo.collapseDims(dim);
sourceInfo.reduceDim(sourceAddDim);
auto indexInfo =
cuda::detail::getTensorInfo<index_t, unsigned int>(index);
indexInfo.collapseDims();
// A reasonable choice for when to have each thread iterate over
// index to choose
if (numIndex <= 16) {
if (selfInfo.dims == 1 && sourceInfo.dims == 1 && indContig) {
SMALL_INDEX(scalar_t, index_t, unsigned int, 1, 1, -2);
} else if (selfInfo.dims == 2 && sourceInfo.dims == 2 && indContig) {
SMALL_INDEX(scalar_t, index_t, unsigned int, 2, 2, -2);
} else if (selfInfo.dims == 3 && sourceInfo.dims == 3 && indContig) {
SMALL_INDEX(scalar_t, index_t, unsigned int, 3, 3, -2);
} else {
SMALL_INDEX(scalar_t, index_t, unsigned int, -1, -1, -1);
}
} else {
bool indexIsMajor = indexShouldBeMajor(selfInfo, selfAddDim);
if (selfInfo.dims == 1 && sourceInfo.dims == 1 && indContig) {
LARGE_INDEX(scalar_t, index_t, unsigned int, 1, 1, -2, true);
} else if (selfInfo.dims == 2 && sourceInfo.dims == 2 && indContig) {
if (indexIsMajor) {
LARGE_INDEX(scalar_t, index_t, unsigned int, 2, 2, -2, true);
} else {
LARGE_INDEX(scalar_t, index_t, unsigned int, 2, 2, -2, false);
}
} else if (selfInfo.dims == 3 && sourceInfo.dims == 3 && indContig) {
if (indexIsMajor) {
LARGE_INDEX(scalar_t, index_t, unsigned int, 3, 3, -2, true);
} else {
LARGE_INDEX(scalar_t, index_t, unsigned int, 3, 3, -2, false);
}
} else {
LARGE_INDEX(scalar_t, index_t, unsigned int, -1, -1, -1, true);
}
}
});
});
} else {
AT_DISPATCH_ALL_TYPES_AND_COMPLEX_AND3(at::ScalarType::Bool, at::ScalarType::Half, at::ScalarType::BFloat16, self.scalar_type(), "index_add", [&] {
cuda::detail::TensorInfo<scalar_t, uint64_t> selfInfo =
cuda::detail::getTensorInfo<scalar_t, uint64_t>(self_);
int selfAddDim = selfInfo.collapseDims(dim);
selfInfo.reduceDim(selfAddDim);
auto alpha_value = alpha.to<scalar_t>();
cuda::detail::TensorInfo<scalar_t, uint64_t> sourceInfo =
cuda::detail::getTensorInfo<scalar_t, uint64_t>(source_);
int sourceAddDim = sourceInfo.collapseDims(dim);
sourceInfo.reduceDim(sourceAddDim);
AT_DISPATCH_INDEX_TYPES(index.scalar_type(), "index_add_cuda_", [&] () {
cuda::detail::TensorInfo<index_t, uint64_t> indexInfo =
cuda::detail::getTensorInfo<index_t, uint64_t>(index);
indexInfo.collapseDims();
LARGE_INDEX(scalar_t, index_t, uint64_t, -1, -1, -1, true);
});
});
}
#undef SMALL_INDEX
#undef LARGE_INDEX
}
TORCH_IMPL_FUNC(index_add_cuda_out)
(const Tensor& self, int64_t dim, const Tensor& index, const Tensor& source, const Scalar& alpha, const Tensor& result) {
index_add_cuda_impl(self, dim, index, source, alpha, result);
}
namespace {
// We prefer this kernel to avoid reloading index points if the number
// of indices is a small number.
// This kernel in fact works for all choices of problem size, but if
// the number of indices chosen is large, then the
// indexSelectLargeIndex kernel is a better choice to increase
// parallelism.
template <typename T, typename IndicesType, typename IndexType, int DstDim, int SrcDim, int IdxDim>
__global__ void indexSelectSmallIndex(cuda::detail::TensorInfo<T, IndexType> dst,
cuda::detail::TensorInfo<T, IndexType> src,
cuda::detail::TensorInfo<IndicesType, IndexType> indices,
int dstSelectDim,
int srcSelectDim,
IndexType innerSize,
int64_t srcSelectDimSize) {
// In order to avoid reloading the index that we are copying, load
// it once to handle all of the points that are being selected, so
// it can be reused as much as possible. This kernel is chosen when
// this is a good choice (small number of chosen indices), since
// re-accessing indices in addition to src elements can be slow.
for (IndexType dstIndex = 0; dstIndex < indices.sizes[0]; ++dstIndex) {
IndexType srcIndex =
indices.data[cuda::detail::IndexToOffset<IndicesType, IndexType, IdxDim>::get(dstIndex, indices)];
CUDA_KERNEL_ASSERT(srcIndex < srcSelectDimSize);
// We stride over the output ignoring the indexed dimension
// (innerSize), whose offset calculation is handled differently
for (IndexType linearIndex = blockIdx.x * blockDim.x + threadIdx.x;
linearIndex < innerSize;
linearIndex += gridDim.x * blockDim.x) {
IndexType dstOffset =
cuda::detail::IndexToOffset<T, IndexType, DstDim>::get(linearIndex, dst);
dstOffset += dstIndex * dst.strides[dstSelectDim];
IndexType srcOffset =
cuda::detail::IndexToOffset<T, IndexType, SrcDim>::get(linearIndex, src);
srcOffset += srcIndex * src.strides[srcSelectDim];
dst.data[dstOffset] = src.data[srcOffset];
}
}
}
// We prefer this kernel to balance parallelism across index points,
// if there are a large number of indices.
// This kernel in fact works for all choices of problem size, but if
// the number of indices chosen is small, then the
// indexSelectSmallIndex kernel is a better choice to reduce memory
// accesses.
template <typename T, typename IndicesType, typename IndexType, int DstDim, int SrcDim, int IdxDim,
bool IndexIsMajor>
__global__ void indexSelectLargeIndex(cuda::detail::TensorInfo<T, IndexType> dst,
cuda::detail::TensorInfo<T, IndexType> src,
cuda::detail::TensorInfo<IndicesType, IndexType> indices,
int dstSelectDim,
int srcSelectDim,
IndexType totalSize,
IndexType innerSize,
int64_t srcSelectDimSize) {
// We stride over the output including the indexed dimension
// (totalSize), and calculate the destination index point based on that
for (IndexType linearIndex = blockIdx.x * blockDim.x + threadIdx.x;
linearIndex < totalSize;
linearIndex += gridDim.x * blockDim.x) {
IndexType dstIndex, elementInSlice;
if (IndexIsMajor) {
dstIndex = linearIndex / innerSize;
elementInSlice = linearIndex % innerSize;
}
else {
elementInSlice = linearIndex / innerSize;
dstIndex = linearIndex % innerSize;
}
IndexType srcIndex =
indices.data[cuda::detail::IndexToOffset<IndicesType, IndexType, IdxDim>::get(dstIndex, indices)];
CUDA_KERNEL_ASSERT(srcIndex < srcSelectDimSize);
IndexType dstOffset =
cuda::detail::IndexToOffset<T, IndexType, DstDim>::get(elementInSlice, dst);
dstOffset += dstIndex * dst.strides[dstSelectDim];
IndexType srcOffset =
cuda::detail::IndexToOffset<T, IndexType, SrcDim>::get(elementInSlice, src);
srcOffset += srcIndex * src.strides[srcSelectDim];
dst.data[dstOffset] = src.data[srcOffset];
}
}
namespace {
// When using a 0-dim scalar tensor, we need the legacy (THC) semantics of
// TensorInfo: Pretend that the scalar tensor is in fact a one-element vector.
template <typename T, typename IndexType>
cuda::detail::TensorInfo<T, IndexType>
tensorInfoLegacyIfScalar(cuda::detail::TensorInfo<T, IndexType> ti) {
if (ti.dims == 0) {
ti.dims = 1;
ti.sizes[0] = 1;
ti.strides[0] = 1;
}
return ti;
}
}
template <typename scalar_t>
void index_select_out_cuda_impl(
Tensor& out,
const Tensor& self,
long dim,
const Tensor& index) {
ptrdiff_t numIndices = index.numel();
int selfDims = self.dim() == 0 ? 1 : self.dim();
const hipStream_t stream = at::hip::getCurrentHIPStreamMasqueradingAsCUDA();
TORCH_CHECK(
index.dim() <= 1, "Index is supposed to be an empty tensor or a vector");
TORCH_CHECK(dim < selfDims, "Indexing dim is out of bounds");
std::vector<int64_t> newSize = self.sizes().vec();
if (self.dim() > 0) {
newSize[dim] = numIndices;
}
if (self.is_quantized()){
out = at::empty_quantized(newSize, out);
} else {
at::native::resize_output(out, newSize);
}
ptrdiff_t outTotalSize = out.numel();
if (outTotalSize == 0) {
return;
}
bool indContig = index.is_contiguous();
// The `self` is partitioned into two parts:
// -the size of each slice we are indexing, which is the
// total size of the tensor ignoring dimension `dim`;
// -the number of indices we are choosing, which is the total size
// of the tensor `indices`.
int64_t selfSelectDimSize = self.dim() == 0 ? 1 : self.size(dim);
ptrdiff_t sliceSize = outTotalSize / numIndices;
int mpc = at::cuda::getCurrentDeviceProperties()->multiProcessorCount;
#define SMALL_INDEX(TENSOR_TYPE, INDICES_TYPE, TYPE, DST_DIM, SRC_DIM, IDX_DIM) \
hipLaunchKernelGGL(( indexSelectSmallIndex<TENSOR_TYPE, INDICES_TYPE, TYPE, DST_DIM, SRC_DIM, IDX_DIM>) \
, dim3(smallIndexGrid), dim3(smallIndexBlock), 0, stream, \
outInfo, selfInfo, indicesInfo, \
outSelectDim, selfSelectDim, static_cast<TYPE>(sliceSize), \
selfSelectDimSize); \
C10_HIP_KERNEL_LAUNCH_CHECK();
#define LARGE_INDEX(TENSOR_TYPE, INDICES_TYPE, TYPE, \
DST_DIM, SRC_DIM, IDX_DIM, IDX_IS_MAJOR) \
hipLaunchKernelGGL(( indexSelectLargeIndex<TENSOR_TYPE, INDICES_TYPE, TYPE, \
DST_DIM, SRC_DIM, IDX_DIM, IDX_IS_MAJOR>) \
, dim3(largeIndexGrid), dim3(largeIndexBlock), 0, stream, \
outInfo, selfInfo, indicesInfo, \
outSelectDim, selfSelectDim, static_cast<TYPE>(outTotalSize), \
static_cast<TYPE>((IDX_IS_MAJOR) ? sliceSize : numIndices), \
selfSelectDimSize); \
C10_HIP_KERNEL_LAUNCH_CHECK();
dim3 smallIndexGrid(::min(ceil_div(sliceSize, (ptrdiff_t)128), (ptrdiff_t)(mpc * 8)));
dim3 smallIndexBlock(::min(sliceSize, (ptrdiff_t)128));
dim3 largeIndexGrid(::min(ceil_div(outTotalSize, (ptrdiff_t)128), (ptrdiff_t)(mpc * 8)));
dim3 largeIndexBlock(::min(outTotalSize, (ptrdiff_t)128));
if (cuda::detail::canUse32BitIndexMath(out) &&
cuda::detail::canUse32BitIndexMath(self) &&
cuda::detail::canUse32BitIndexMath(index)) {
auto outInfo = tensorInfoLegacyIfScalar(cuda::detail::getTensorInfo<scalar_t, unsigned int>(out));
int outSelectDim = outInfo.collapseDims(dim);
outInfo.reduceDim(outSelectDim);
auto selfInfo = tensorInfoLegacyIfScalar(cuda::detail::getTensorInfo<scalar_t, unsigned int>(self));
int selfSelectDim = selfInfo.collapseDims(dim);
selfInfo.reduceDim(selfSelectDim);
AT_DISPATCH_INDEX_TYPES(index.scalar_type(), "index_select_out_cuda_impl", [&] () {
auto indicesInfo = tensorInfoLegacyIfScalar(cuda::detail::getTensorInfo<index_t, unsigned int>(index));
indicesInfo.collapseDims();
// A reasonable choice for when to have each thread iterate over
// indices to choose
if (numIndices <= 16) {
if (outInfo.dims == 1 && selfInfo.dims == 1 && indContig) {
SMALL_INDEX(scalar_t, index_t, unsigned int, 1, 1, -2);
} else if (outInfo.dims == 2 && selfInfo.dims == 2 && indContig) {
SMALL_INDEX(scalar_t, index_t, unsigned int, 2, 2, -2);
} else if (outInfo.dims == 3 && selfInfo.dims == 3 && indContig) {
SMALL_INDEX(scalar_t, index_t, unsigned int, 3, 3, -2);
} else {
SMALL_INDEX(scalar_t, index_t, unsigned int, -1, -1, -1);
}
} else {
bool indexIsMajor = indexShouldBeMajor(outInfo, outSelectDim);
if (outInfo.dims == 1 && selfInfo.dims == 1 && indContig) {
LARGE_INDEX(scalar_t, index_t, unsigned int, 1, 1, -2, true);
} else if (outInfo.dims == 2 && selfInfo.dims == 2 && indContig) {
if (indexIsMajor) {
LARGE_INDEX(scalar_t, index_t, unsigned int, 2, 2, -2, true);
} else {
LARGE_INDEX(scalar_t, index_t, unsigned int, 2, 2, -2, false);
}
} else if (outInfo.dims == 3 && selfInfo.dims == 3 && indContig) {
if (indexIsMajor) {
LARGE_INDEX(scalar_t, index_t, unsigned int, 3, 3, -2, true);
} else {
LARGE_INDEX(scalar_t, index_t, unsigned int, 3, 3, -2, false);
}
} else {
LARGE_INDEX(scalar_t, index_t, unsigned int, -1, -1, -1, true);
}
}
});
} else {
auto outInfo = tensorInfoLegacyIfScalar(cuda::detail::getTensorInfo<scalar_t, uint64_t>(out));
int outSelectDim = outInfo.collapseDims(dim);
outInfo.reduceDim(outSelectDim);
auto selfInfo = tensorInfoLegacyIfScalar(cuda::detail::getTensorInfo<scalar_t, uint64_t>(self));
int selfSelectDim = selfInfo.collapseDims(dim);
selfInfo.reduceDim(selfSelectDim);
AT_DISPATCH_INDEX_TYPES(index.scalar_type(), "index_select_out_cuda_impl", [&] () {
auto indicesInfo = tensorInfoLegacyIfScalar(cuda::detail::getTensorInfo<index_t, uint64_t>(index));
indicesInfo.collapseDims();
LARGE_INDEX(scalar_t, index_t, uint64_t, -1, -1, -1, true);
});
}
#undef SMALL_INDEX
#undef LARGE_INDEX
}
} // anonymous namespace
Tensor& index_select_out_cuda(
const Tensor& self,
int64_t dim,
const Tensor& index,
Tensor& out) {
static constexpr string_view DIM_WARNING =
"Tensor too large or too many (> 25) dimensions";
TORCH_CHECK(
at::cuda::check_device({out, self, index}),
"Input, output and indices must be on the current device");
at::assert_no_internal_overlap(out);
at::assert_no_overlap(out, self);
at::assert_no_overlap(out, index);
dim = at::maybe_wrap_dim(dim, self);
TORCH_CHECK(self.dim() <= MAX_TENSORINFO_DIMS, DIM_WARNING);
TORCH_CHECK(index.dim() <= MAX_TENSORINFO_DIMS, DIM_WARNING);
if (self.is_quantized()){
TORCH_CHECK(
self.qscheme() == kPerTensorAffine,
"Only per_tensor quantized quantized tensors are supported by index_select.")
AT_DISPATCH_QINT_TYPES(out.scalar_type(), "index_select_quant_cuda", [&] {
index_select_out_cuda_impl<scalar_t>(out, self, dim, index);
});
} else {
AT_DISPATCH_ALL_TYPES_AND_COMPLEX_AND3(
at::ScalarType::Half,
at::ScalarType::Bool,
at::ScalarType::BFloat16,
out.scalar_type(),
"index_select_cuda",
[&] { index_select_out_cuda_impl<scalar_t>(out, self, dim, index); });
}
return out;
}
Tensor index_select_cuda(const Tensor& self, int64_t dim, const Tensor& index) {
Tensor out;
if (self.is_quantized()){
TORCH_CHECK(
self.qscheme() == kPerTensorAffine,
"Only per_tensor quantized quantized tensors are supported by index_select.")
out = at::empty_quantized({0}, self);
} else {
out = at::empty({0}, self.options());
}
at::native::index_select_out_cuda(self, dim, index, out);
return out;
}
namespace {
template <typename mask_t>
void masked_fill_kernel(TensorIterator& iter, const Scalar& value) {
AT_DISPATCH_ALL_TYPES_AND_COMPLEX_AND3(
kBool, kHalf, kBFloat16, iter.common_dtype(), "masked_fill_", [&]() {
const auto value_ = value.to<scalar_t>();
gpu_kernel(
iter, [value_] GPU_LAMBDA(scalar_t self, mask_t mask) -> scalar_t {
if (mask) {
return value_;
}
return self;
});
});
}
} // anonymous namespace
Tensor & masked_fill__cuda(Tensor& self, const Tensor & mask, const Scalar& value) {
TORCH_CHECK(self.device() == mask.device(), "expected self and mask to be on the same device, but got mask on ",
mask.device(), " and self on ", self.device());
TORCH_CHECK(mask.scalar_type() == kByte || mask.scalar_type() == kBool,
"expected mask dtype to be Bool but got ", mask.scalar_type());
auto maybe_outnames = namedinference::broadcast_to_outnames(self, mask, "masked_fill_");
if (at::has_internal_overlap(self) == MemOverlap::YES) {
TORCH_WARN(
"Use of masked_fill_ on expanded tensors is deprecated. "
"Please clone() the tensor before performing this operation. "
"This also applies to advanced indexing e.g. tensor[mask] = scalar");
}
at::assert_no_partial_overlap(self, mask);
c10::MaybeOwned<Tensor> b_mask = expand_inplace(self, mask, "masked_fill_");
auto iter = TensorIteratorConfig()
.set_check_mem_overlap(false)
.check_all_same_dtype(false)
.resize_outputs(false)
.add_output(self)
.add_input(self)
.add_input(*b_mask)
.build();
if (b_mask->dtype() == at::ScalarType::Byte) {
TORCH_WARN("masked_fill_ received a mask with dtype torch.uint8, this behavior is now deprecated," \
"please use a mask with dtype torch.bool instead.");
masked_fill_kernel<uint8_t>(iter, value);
} else {
masked_fill_kernel<bool>(iter, value);
}
namedinference::propagate_names_if_nonempty(self, maybe_outnames);
return self;
}
Tensor & masked_fill__cuda(Tensor& self, const Tensor & mask, const Tensor & value) {
TORCH_CHECK(value.dim() == 0, "masked_fill_ only supports a 0-dimensional value tensor, but got tensor "
"with ", value.dim(), " dimension(s).");
return masked_fill__cuda(self, mask, value.item());
}
} // native
} // at
| 1e5aad3cab34b404ff130b102ac76df8ca6017d6.cu | #include <ATen/native/TensorAdvancedIndexing.h>
#include <ATen/native/IndexingUtils.h>
#include <ATen/ATen.h>
#include <ATen/ceil_div.h>
#include <ATen/NativeFunctions.h>
#include <ATen/ExpandUtils.h>
#include <ATen/MemoryOverlap.h>
#include <ATen/native/TensorIterator.h>
#include <ATen/native/cuda/Loops.cuh>
#include <ATen/native/Resize.h>
#include <ATen/AccumulateType.h>
#include <ATen/cuda/detail/IndexUtils.cuh>
#include <ATen/cuda/Atomic.cuh>
#include <ATen/cuda/CUDAUtils.h>
#include <ATen/cuda/CUDAContext.h>
#include <ATen/cuda/cub.h>
#include <c10/util/irange.h>
#include <c10/core/QScheme.h>
#include <limits>
#include <c10/macros/Macros.h>
namespace {
template <typename scalar_t, int SZ>
__global__ void indexing_backward_kernel(
int64_t* sorted_indices, int64_t* indices, scalar_t* grad_output, scalar_t* grad_weight,
int64_t numel, int64_t stride, int64_t stride_before, int64_t outer_dim, bool accumulate) {
//numel is total number of flattened indices, not expanded to dimensions that are not indexed.
//stride is the cumulative size of the not-indexed last dimensions
//stride_before is the stride of the dimension immediately preceding first indexed dimension
//if indexing starts from the 0th dimension, stride_before does not matter because blockIdx.z will be 0 in this case
//outer_dim is number of elements in the first unindexed dimensions
using accscalar_t = at::acc_type<scalar_t, true>;
// Each warp is responsible for an input into the LookupTable.
// If the preceding input has the same destination index as this input, then the warp
// exits immediately. The warp also processes subsequent inputs with the
// same value.
//
// Input Warp
// 1 <warp 1>
// 1 <warp 1> (<warp 2> exits without doing any work)
// 5 <warp 3>
// 8 <warp 4>
// Number of values processed by each thread (grain size)
for (int64_t z = blockIdx.z; z < outer_dim; z += gridDim.z){
int64_t idx = blockIdx.x * blockDim.y + threadIdx.y;
if (idx < numel
&& (idx == 0 || sorted_indices[idx] != sorted_indices[idx - 1])){
do {
int64_t start_feature = threadIdx.x + blockIdx.y * blockDim.x * SZ;
// if not accumulate, we only keep the last duplicate index so skip those before it
if (!accumulate && (idx < numel - 1) && sorted_indices[idx] == sorted_indices[idx + 1]) {
idx++;
continue;
}
const int64_t weight_row = ((int64_t) sorted_indices[idx]) * stride + z * stride_before;
const int64_t grad_row = ((int64_t) indices[idx]) * stride + z * numel * stride;
const accscalar_t scale = (accscalar_t)1.0;
accscalar_t gradient[SZ];
accscalar_t weight[SZ];
while (start_feature < stride) {
#pragma unroll
for (int ii = 0; ii < SZ; ii++) {
int64_t feature_dim = start_feature + ii * C10_WARP_SIZE;
if (feature_dim < stride) {
gradient[ii] = static_cast<accscalar_t>(grad_output[grad_row + feature_dim]);
if (accumulate) {
weight[ii] = static_cast<accscalar_t>(grad_weight[weight_row + feature_dim]);
}
}
}
#pragma unroll
for (int ii = 0; ii < SZ; ii++) {
if (accumulate) {
weight[ii] += gradient[ii] * scale;
} else {
weight[ii] = gradient[ii] * scale;
}
}
#pragma unroll
for (int ii = 0; ii < SZ; ii++) {
int64_t feature_dim = start_feature + ii * C10_WARP_SIZE;
if (feature_dim < stride) {
grad_weight[weight_row + feature_dim] = static_cast<scalar_t>(weight[ii]);
}
}
start_feature += gridDim.y * blockDim.x * SZ;
}
idx++;
} while (idx < numel && sorted_indices[idx] == sorted_indices[idx - 1]);
}
}
}
}
namespace at { namespace native {
static Tensor wrapIndexOnce(const Tensor & index, int64_t dim, int64_t dim_size, bool check_range=true) {
//we don't need to check range in backward - if there were out of bounds indices forward should already have errored out
if (index.numel() != 0 && check_range) {
auto max_idx = index.max().item<int64_t>();
auto min_idx = index.min().item<int64_t>();
if (max_idx >= dim_size) {
TORCH_CHECK_INDEX(false, "index ", max_idx, " is out of bounds for dimension ", dim, " with size ", dim_size);
}
if (min_idx < -dim_size) {
TORCH_CHECK_INDEX(false, "index ", min_idx, " is out of bounds for dimension ", dim, " with size ", dim_size);
}
}
return index.remainder(dim_size);
}
static std::vector<int64_t> computeLinearStride(const Tensor & tensor) {
// computes the stride as if tensor were contiguous
auto sizes = tensor.sizes();
std::vector<int64_t> stride(tensor.dim());
stride[tensor.dim() - 1] = 1;
std::partial_sum(sizes.rbegin(), sizes.rend() - 1, stride.rbegin() + 1, std::multiplies<int64_t>());
return stride;
}
static std::tuple<Tensor, int64_t, int64_t, int64_t>
computeLinearIndex(const Tensor & src, TensorList indices, bool check_range) {
auto strides = computeLinearStride(src);
const auto& device = src.options().device();
// Compute the linear index by multiplying the indexing tensors by the
// stride and summing them. All the indexing tensors have the same shape at
// this point. We also compute the number of dimensions before and after that
// are not being index.
Tensor linearIndex;
int64_t emptyBefore = 0, emptyAfter = 0, nElemBefore = 1, nElemAfter = 1, strideBefore =0;
for (const auto i: c10::irange(src.dim())) {
if (indices[i].defined()) {
// Cast index to the longType matching src's device
// This allows us to support ie indexing a cuda tensor with a cpu tensor
Tensor index = (wrapIndexOnce(indices[i], i, src.size(i), check_range) * strides[i]).to(device);
if (linearIndex.defined()) {
linearIndex += index;
} else {
linearIndex = index;
if (i>0) {
strideBefore = src.stride(i-1); // stride after undefined dimensions
}
}
} else if (linearIndex.defined()) {
emptyAfter++;
nElemAfter *= src.size(i);
} else {
emptyBefore++;
nElemBefore *= src.size(i);
}
}
return std::make_tuple(std::move(linearIndex), nElemBefore, strideBefore, nElemAfter);
}
static std::tuple<Tensor, Tensor, int64_t, int64_t, int64_t, std::vector<int64_t>> makeLinearIndex(Tensor self, const c10::List<c10::optional<at::Tensor>>& orig, bool check_range) {
checkIndexTensorTypes(orig);
// first expand BoolTensor (masks) or ByteTensor (masks) into 1 or more LongTensors
auto indices = expandTensors(self, orig);
// next broadcast all index tensors together
indices = expand_outplace(indices);
// add missing null Tensors so that it matches self.dim()
while (indices.size() < (size_t)self.dim()) {
indices.emplace_back();
}
// if the non-null indices are not all adjacent, transpose self and indices
// together so that they're adjacent at the front
std::vector<int64_t> inversePerm;
if (!hasContiguousSubspace(indices)) {
std::tie(self, indices, inversePerm) = transposeToFrontAndInvPerm(self, indices);
}
int64_t nElemBefore, strideBefore, nElemAfter;
Tensor linearIndex;
std::tie(linearIndex, nElemBefore, strideBefore, nElemAfter) = computeLinearIndex(self, indices, check_range);
return std::make_tuple(linearIndex, self, nElemBefore, strideBefore, nElemAfter, inversePerm);
}
void index_put_with_sort_kernel_thrust_helper(Tensor &linearIndex, Tensor &orig_indices, Tensor &sorted_indices, int64_t num_indices);
namespace {
int64_t largestIndex(const Tensor &self) {
int64_t result = 0;
for (const auto i: c10::irange(self.dim())) {
result += (self.sizes()[i] - 1) * self.strides()[i];
}
return result;
}
void index_put_with_sort_kernel(Tensor & self, const c10::List<c10::optional<Tensor>>& indices, const Tensor & value, bool accumulate, bool unsafe) {
if (indices.size() > (size_t)self.dim()) {
TORCH_CHECK_INDEX(false, "too many indices for tensor of dimension ", self.dim(), " (got ", indices.size(), ")");
}
if (!self.is_contiguous()) {
self = self.contiguous();
}
Tensor linearIndex, src, expandedValue = value;
int64_t nElemBefore, strideBefore, sliceSize;
std::vector<int64_t> inversePerm;
std::tie(linearIndex, src, nElemBefore, strideBefore, sliceSize, inversePerm) = makeLinearIndex(self, indices, !unsafe);
int64_t num_indices = linearIndex.numel();
if (expandedValue.numel() < num_indices * nElemBefore * sliceSize) {
auto expanded_size = at::DimVector(expandedValue.sizes());
auto size1 = expandedValue.sizes();
auto size2 = linearIndex.sizes();
if (are_expandable(size1, size2)) {
expanded_size = infer_size_dimvector(size1, size2);
}
if (nElemBefore > 1) {
expanded_size.insert(expanded_size.begin(), nElemBefore);
}
expandedValue = expandedValue.expand(expanded_size);
}
expandedValue = expandedValue.contiguous();
if (num_indices > 0 && sliceSize > 0) {
const bool permuted = !src.is_contiguous();
auto src_ = permuted ? src.contiguous() : src;
linearIndex = linearIndex.reshape(-1);
auto sorted_indices = at::empty_like(linearIndex, LEGACY_CONTIGUOUS_MEMORY_FORMAT);
auto orig_indices = at::empty_like(linearIndex, LEGACY_CONTIGUOUS_MEMORY_FORMAT);
const cudaStream_t stream = at::cuda::getCurrentCUDAStream();
linearIndex.divide_(sliceSize, "trunc");
// cub on CUDA <= 11.2 have a bug that for small sizes
// cub's sort can be much slower than thrust's merge sort
// this bug is fixed in CUDA 11.3
#if (defined(CUDA_VERSION) && CUDA_VERSION < 11030) || defined(USE_ROCM)
if (num_indices < 50000) {
index_put_with_sort_kernel_thrust_helper(linearIndex, orig_indices, sorted_indices, num_indices);
} else
#endif
{
// Sort the inputs into sorted with the corresponding indices
auto range = at::arange(num_indices, linearIndex.options());
// linearIndex can not be negative, and we take advantage of this
// fact to sort on less bits for better performance.
int64_t nbits = cuda::cub::get_num_bits(largestIndex(self) / sliceSize);
cuda::cub::radix_sort_pairs(
linearIndex.data_ptr<int64_t>(), sorted_indices.data_ptr<int64_t>(),
range.data_ptr<int64_t>(), orig_indices.data_ptr<int64_t>(),
num_indices, false, 0, nbits);
}
TORCH_INTERNAL_ASSERT(
linearIndex.numel()*sliceSize*nElemBefore == expandedValue.numel(),
"number of flattened indices did not match number of elements in the value tensor: ",
linearIndex.numel()*sliceSize*nElemBefore, " vs ", expandedValue.numel());
const int UNROLL = 4;
const int indices_per_block = 4;
dim3 grid(ceil_div(num_indices, (int64_t) indices_per_block),
std::min<int>(at::cuda::getCurrentDeviceProperties()->maxGridSize[1], ceil_div(sliceSize, (int64_t) (C10_WARP_SIZE*UNROLL))),
std::min(std::max<int>(1,nElemBefore), at::cuda::getCurrentDeviceProperties()->maxGridSize[2]));
dim3 block(C10_WARP_SIZE, indices_per_block);
AT_DISPATCH_ALL_TYPES_AND_COMPLEX_AND3(at::ScalarType::Half, at::ScalarType::Bool, at::ScalarType::BFloat16,
expandedValue.scalar_type(), "indexing_backward", [&] {
indexing_backward_kernel<scalar_t, UNROLL><<<grid, block, 0, stream>>>(
sorted_indices.data_ptr<int64_t>(),
orig_indices.data_ptr<int64_t>(),
expandedValue.data_ptr<scalar_t>(),
src_.data_ptr<scalar_t>(),
num_indices,
sliceSize,
strideBefore,
nElemBefore,
accumulate);
C10_CUDA_KERNEL_LAUNCH_CHECK();
});
if (permuted) {
self.copy_(src_.permute(inversePerm));
}
}
}
REGISTER_CUDA_DISPATCH(index_put_with_sort_stub, &index_put_with_sort_kernel);
} //anonymous
// Check tensor dimensions for index operations, and return the slice size.
static ptrdiff_t getSliceSize(const Tensor & dst,
int dim,
const Tensor & index,
const Tensor & src)
{
const auto dstDims = dst.dim();
const auto srcDims = src.dim();
TORCH_CHECK(index.dim() <= 1, "Index must be vector or scalar");
ptrdiff_t dstSliceSize = 1;
TORCH_CHECK(dim >= 0 && dim < dstDims, "Indexing dim ", dim, " is out of bounds");
for (const auto d: c10::irange(dstDims)) {
if (d != dim) {
dstSliceSize *= dst.size(d);
}
}
TORCH_CHECK(dim < srcDims, "Indexing dim ", dim, " is out of bounds");
TORCH_CHECK(index.numel() == src.size(dim),
"length of src.size[dim] is not equal to length of indices");
ptrdiff_t srcSliceSize = 1;
bool mismatch = false;
if (dstDims != srcDims) mismatch = true;
for (const auto d: c10::irange(srcDims)) {
if (d != dim) {
srcSliceSize *= src.size(d);
if (!mismatch && dst.size(d) != src.size(d)) mismatch = true;
}
}
TORCH_CHECK(dstSliceSize == srcSliceSize,
"Source/destination tensor have different slice sizes (%ld vs %ld)",
dstSliceSize, srcSliceSize);
if (mismatch) {
TORCH_WARN_ONCE(
"Warning: source/destination slices have same size but different "
"shape for an index operation. This behavior is deprecated.\n");
}
return dstSliceSize;
}
// We prefer this kernel to avoid reloading index points if the number
// of indices is a small number.
// This kernel in fact works for all choices of problem size, but if
// the number of indices chosen is large, then the
// indexAddLargeIndex kernel is a better choice to increase
// parallelism.
template <typename T, typename IndicesType, typename IndexType, int DstDim, int SrcDim, int IdxDim>
__global__ void indexAddSmallIndex(cuda::detail::TensorInfo<T, IndexType> dst,
cuda::detail::TensorInfo<T, IndexType> src,
cuda::detail::TensorInfo<IndicesType, IndexType> indices,
int dstAddDim,
int srcAddDim,
IndexType innerSize,
int64_t dstAddDimSize,
T alpha) {
// In order to avoid reloading the index that we are copying, load
// it once to handle all of the points that are being selected, so
// it can be reused as much as possible. This kernel is chosen when
// this is a good choice (small number of chosen indices), since
// re-accessing indices in addition to src elements can be slow.
for (IndexType srcIndex = 0; srcIndex < indices.sizes[0]; ++srcIndex) {
// Lua indices begin at 1
IndexType dstIndex =
indices.data[cuda::detail::IndexToOffset<IndicesType, IndexType, IdxDim>::get(srcIndex, indices)];
CUDA_KERNEL_ASSERT(dstIndex < dstAddDimSize);
// We stride over the output ignoring the indexed dimension
// (innerSize), whose offset calculation is handled differently
for (IndexType linearIndex = blockIdx.x * blockDim.x + threadIdx.x;
linearIndex < innerSize;
linearIndex += gridDim.x * blockDim.x) {
IndexType dstOffset =
cuda::detail::IndexToOffset<T, IndexType, DstDim>::get(linearIndex, dst);
dstOffset += dstIndex * dst.strides[dstAddDim];
IndexType srcOffset =
cuda::detail::IndexToOffset<T, IndexType, SrcDim>::get(linearIndex, src);
srcOffset += srcIndex * src.strides[srcAddDim];
gpuAtomicAddNoReturn(&dst.data[dstOffset], src.data[srcOffset] * alpha);
}
}
}
// We prefer this kernel to balance parallelism across index points,
// if there are a large number of indices.
// This kernel in fact works for all choices of problem size, but if
// the number of indices chosen is small, then the
// indexAddSmallIndex kernel is a better choice to reduce memory
// accesses.
template <typename T, typename IndicesType, typename IndexType, int DstDim, int SrcDim, int IdxDim,
bool IndexIsMajor>
__global__ void indexAddLargeIndex(cuda::detail::TensorInfo<T, IndexType> dst,
cuda::detail::TensorInfo<T, IndexType> src,
cuda::detail::TensorInfo<IndicesType, IndexType> indices,
int dstAddDim,
int srcAddDim,
IndexType totalSize,
IndexType innerSize,
int64_t dstAddDimSize,
T alpha) {
// We stride over the output including the indexed dimension
// (totalSize), and calculate the destination index point based on that
for (IndexType linearIndex = blockIdx.x * blockDim.x + threadIdx.x;
linearIndex < totalSize;
linearIndex += gridDim.x * blockDim.x) {
IndexType srcIndex, elementInSlice;
if (IndexIsMajor) {
srcIndex = linearIndex / innerSize;
elementInSlice = linearIndex % innerSize;
}
else {
elementInSlice = linearIndex / innerSize;
srcIndex = linearIndex % innerSize;
}
// Lua indices begin at 1
IndexType dstIndex =
indices.data[cuda::detail::IndexToOffset<IndicesType, IndexType, IdxDim>::get(srcIndex, indices)];
CUDA_KERNEL_ASSERT(dstIndex < dstAddDimSize);
IndexType dstOffset =
cuda::detail::IndexToOffset<T, IndexType, DstDim>::get(elementInSlice, dst);
dstOffset += dstIndex * dst.strides[dstAddDim];
IndexType srcOffset =
cuda::detail::IndexToOffset<T, IndexType, SrcDim>::get(elementInSlice, src);
srcOffset += srcIndex * src.strides[srcAddDim];
gpuAtomicAddNoReturn(&dst.data[dstOffset], src.data[srcOffset] * alpha);
}
}
// Compare the stride between adjacent slices (sliceStride) with strides in the
// other dimensions (i.e., strides *inside* each slice).
//
// - Returns true if some dimension inside the slice has lower stride than
// sliceStride. The simplest example is a 2-D contiguous tensor with sliceDim
// == 0 (that is, each slice is a row).
//
// In this case, we choose the CUDA kernel that processes the data in
// "index-major order". For example, if thread count equals slice size, then
// all threads process slice #0 in lockstep, and then slice #1, and so on.
//
// - Otherwise (i.e., sliceStride has the lowest value), this function returns
// false. The simplest example is a 2-D contiguous tensor with sliceDim == 1
// (each slice is a column).
//
// In this case, we choose the CUDA kernel that processes the data in
// "elementInSlice-major order". For example, each thread can process element
// #0 of every slice, and then element #1 of every slice, and so on.
template <typename scalar_t>
bool indexShouldBeMajor(cuda::detail::TensorInfo<scalar_t, unsigned int> &info,
int sliceDim)
{
// The stride between adjacent slices (e.g., between element #0 of slice #100
// and element #0 of slice #101).
unsigned int sliceStride = info.strides[sliceDim];
for (const auto i: c10::irange(info.dims)) {
if (i != sliceDim && info.sizes[i] > 1 && info.strides[i] < sliceStride) {
return true;
}
}
return false;
}
void index_add_cuda_impl(const Tensor& self, int64_t dim, const Tensor& index, const Tensor& source, const Scalar& alpha, const Tensor& result) {
if (!result.is_same(self)) result.copy_(self);
// Scalars are treated as 1-d tensor
Tensor self_ = (result.dim() == 0) ? result.view(1) : result;
Tensor source_ = (source.dim() == 0) ? source.view(1) : source;
TORCH_CHECK(result.dim() <= MAX_TENSORINFO_DIMS, "tensor has too many (>", MAX_TENSORINFO_DIMS, ") dims");
TORCH_CHECK(source.dim() <= MAX_TENSORINFO_DIMS, "tensor has too many (>", MAX_TENSORINFO_DIMS, ") dims" );
TORCH_CHECK(index.dim() <= MAX_TENSORINFO_DIMS, "tensor has too many (>", MAX_TENSORINFO_DIMS, ") dims");
if (globalContext().deterministicAlgorithms()){
torch::List<c10::optional<Tensor>> indices;
indices.reserve(dim + 1);
for (const auto i: c10::irange(dim)) {
indices.emplace_back();
}
indices.emplace_back(index.to(at::kLong));
result.index_put_(indices, source * alpha, true);
return;
}
// The `source` is partitioned into two parts:
// -the size of each slice we are indexing, which is the
// total size of the tensor ignoring dimension `dim`;
// -the number of index we are choosing, which is the total size
// of the tensor `index`.
ptrdiff_t sliceSize = getSliceSize(self_, dim, index, source_);
ptrdiff_t sourceTotalSize = source.numel();
int64_t selfAddDimSize = self_.size(dim);
ptrdiff_t numIndex = index.numel();
if (sliceSize == 0) {
return;
}
const cudaStream_t stream = at::cuda::getCurrentCUDAStream();
bool indContig = index.is_contiguous();
int mpc = at::cuda::getCurrentDeviceProperties()->multiProcessorCount;
#define SMALL_INDEX(TENSOR_TYPE, INDICES_TYPE, TYPE, SELF_DIM, SOURCE_DIM, IDX_DIM) \
indexAddSmallIndex<TENSOR_TYPE, INDICES_TYPE, TYPE, SELF_DIM, SOURCE_DIM, IDX_DIM> \
<<<smallIndexGrid, smallIndexBlock, 0, stream>>>( \
selfInfo, sourceInfo, indexInfo, \
selfAddDim, sourceAddDim, sliceSize, selfAddDimSize, alpha_value); \
C10_CUDA_KERNEL_LAUNCH_CHECK();
#define LARGE_INDEX(TENSOR_TYPE, INDICES_TYPE, TYPE, \
SELF_DIM, SOURCE_DIM, IDX_DIM, IDX_IS_MAJOR) \
indexAddLargeIndex<TENSOR_TYPE, INDICES_TYPE, TYPE, \
SELF_DIM, SOURCE_DIM, IDX_DIM, IDX_IS_MAJOR> \
<<<largeIndexGrid, largeIndexBlock, 0, stream>>>( \
selfInfo, sourceInfo, indexInfo, \
selfAddDim, sourceAddDim, sourceTotalSize, \
(IDX_IS_MAJOR) ? sliceSize : numIndex, \
selfAddDimSize, alpha_value); \
C10_CUDA_KERNEL_LAUNCH_CHECK();
dim3 smallIndexGrid(std::min(ceil_div(sliceSize, (ptrdiff_t)128), (ptrdiff_t)(mpc * 8)));
dim3 smallIndexBlock(std::min(sliceSize, (ptrdiff_t)128));
dim3 largeIndexGrid(std::min(ceil_div(sourceTotalSize, (ptrdiff_t)128), (ptrdiff_t)(mpc * 8)));
dim3 largeIndexBlock(std::min(sourceTotalSize, (ptrdiff_t)128));
if (cuda::detail::canUse32BitIndexMath(result) &&
cuda::detail::canUse32BitIndexMath(source) &&
cuda::detail::canUse32BitIndexMath(index)) {
AT_DISPATCH_ALL_TYPES_AND_COMPLEX_AND3(at::ScalarType::Bool, at::ScalarType::Half, at::ScalarType::BFloat16, result.scalar_type(), "index_add", [&] {
cuda::detail::TensorInfo<scalar_t, unsigned int> selfInfo =
cuda::detail::getTensorInfo<scalar_t, unsigned int>(self_);
int selfAddDim = selfInfo.collapseDims(dim);
selfInfo.reduceDim(selfAddDim);
auto alpha_value = alpha.to<scalar_t>();
AT_DISPATCH_INDEX_TYPES(index.scalar_type(), "index_add_cuda_", [&] () {
auto sourceInfo =
cuda::detail::getTensorInfo<scalar_t, unsigned int>(source_);
int sourceAddDim = sourceInfo.collapseDims(dim);
sourceInfo.reduceDim(sourceAddDim);
auto indexInfo =
cuda::detail::getTensorInfo<index_t, unsigned int>(index);
indexInfo.collapseDims();
// A reasonable choice for when to have each thread iterate over
// index to choose
if (numIndex <= 16) {
if (selfInfo.dims == 1 && sourceInfo.dims == 1 && indContig) {
SMALL_INDEX(scalar_t, index_t, unsigned int, 1, 1, -2);
} else if (selfInfo.dims == 2 && sourceInfo.dims == 2 && indContig) {
SMALL_INDEX(scalar_t, index_t, unsigned int, 2, 2, -2);
} else if (selfInfo.dims == 3 && sourceInfo.dims == 3 && indContig) {
SMALL_INDEX(scalar_t, index_t, unsigned int, 3, 3, -2);
} else {
SMALL_INDEX(scalar_t, index_t, unsigned int, -1, -1, -1);
}
} else {
bool indexIsMajor = indexShouldBeMajor(selfInfo, selfAddDim);
if (selfInfo.dims == 1 && sourceInfo.dims == 1 && indContig) {
LARGE_INDEX(scalar_t, index_t, unsigned int, 1, 1, -2, true);
} else if (selfInfo.dims == 2 && sourceInfo.dims == 2 && indContig) {
if (indexIsMajor) {
LARGE_INDEX(scalar_t, index_t, unsigned int, 2, 2, -2, true);
} else {
LARGE_INDEX(scalar_t, index_t, unsigned int, 2, 2, -2, false);
}
} else if (selfInfo.dims == 3 && sourceInfo.dims == 3 && indContig) {
if (indexIsMajor) {
LARGE_INDEX(scalar_t, index_t, unsigned int, 3, 3, -2, true);
} else {
LARGE_INDEX(scalar_t, index_t, unsigned int, 3, 3, -2, false);
}
} else {
LARGE_INDEX(scalar_t, index_t, unsigned int, -1, -1, -1, true);
}
}
});
});
} else {
AT_DISPATCH_ALL_TYPES_AND_COMPLEX_AND3(at::ScalarType::Bool, at::ScalarType::Half, at::ScalarType::BFloat16, self.scalar_type(), "index_add", [&] {
cuda::detail::TensorInfo<scalar_t, uint64_t> selfInfo =
cuda::detail::getTensorInfo<scalar_t, uint64_t>(self_);
int selfAddDim = selfInfo.collapseDims(dim);
selfInfo.reduceDim(selfAddDim);
auto alpha_value = alpha.to<scalar_t>();
cuda::detail::TensorInfo<scalar_t, uint64_t> sourceInfo =
cuda::detail::getTensorInfo<scalar_t, uint64_t>(source_);
int sourceAddDim = sourceInfo.collapseDims(dim);
sourceInfo.reduceDim(sourceAddDim);
AT_DISPATCH_INDEX_TYPES(index.scalar_type(), "index_add_cuda_", [&] () {
cuda::detail::TensorInfo<index_t, uint64_t> indexInfo =
cuda::detail::getTensorInfo<index_t, uint64_t>(index);
indexInfo.collapseDims();
LARGE_INDEX(scalar_t, index_t, uint64_t, -1, -1, -1, true);
});
});
}
#undef SMALL_INDEX
#undef LARGE_INDEX
}
TORCH_IMPL_FUNC(index_add_cuda_out)
(const Tensor& self, int64_t dim, const Tensor& index, const Tensor& source, const Scalar& alpha, const Tensor& result) {
index_add_cuda_impl(self, dim, index, source, alpha, result);
}
namespace {
// We prefer this kernel to avoid reloading index points if the number
// of indices is a small number.
// This kernel in fact works for all choices of problem size, but if
// the number of indices chosen is large, then the
// indexSelectLargeIndex kernel is a better choice to increase
// parallelism.
template <typename T, typename IndicesType, typename IndexType, int DstDim, int SrcDim, int IdxDim>
__global__ void indexSelectSmallIndex(cuda::detail::TensorInfo<T, IndexType> dst,
cuda::detail::TensorInfo<T, IndexType> src,
cuda::detail::TensorInfo<IndicesType, IndexType> indices,
int dstSelectDim,
int srcSelectDim,
IndexType innerSize,
int64_t srcSelectDimSize) {
// In order to avoid reloading the index that we are copying, load
// it once to handle all of the points that are being selected, so
// it can be reused as much as possible. This kernel is chosen when
// this is a good choice (small number of chosen indices), since
// re-accessing indices in addition to src elements can be slow.
for (IndexType dstIndex = 0; dstIndex < indices.sizes[0]; ++dstIndex) {
IndexType srcIndex =
indices.data[cuda::detail::IndexToOffset<IndicesType, IndexType, IdxDim>::get(dstIndex, indices)];
CUDA_KERNEL_ASSERT(srcIndex < srcSelectDimSize);
// We stride over the output ignoring the indexed dimension
// (innerSize), whose offset calculation is handled differently
for (IndexType linearIndex = blockIdx.x * blockDim.x + threadIdx.x;
linearIndex < innerSize;
linearIndex += gridDim.x * blockDim.x) {
IndexType dstOffset =
cuda::detail::IndexToOffset<T, IndexType, DstDim>::get(linearIndex, dst);
dstOffset += dstIndex * dst.strides[dstSelectDim];
IndexType srcOffset =
cuda::detail::IndexToOffset<T, IndexType, SrcDim>::get(linearIndex, src);
srcOffset += srcIndex * src.strides[srcSelectDim];
dst.data[dstOffset] = src.data[srcOffset];
}
}
}
// We prefer this kernel to balance parallelism across index points,
// if there are a large number of indices.
// This kernel in fact works for all choices of problem size, but if
// the number of indices chosen is small, then the
// indexSelectSmallIndex kernel is a better choice to reduce memory
// accesses.
template <typename T, typename IndicesType, typename IndexType, int DstDim, int SrcDim, int IdxDim,
bool IndexIsMajor>
__global__ void indexSelectLargeIndex(cuda::detail::TensorInfo<T, IndexType> dst,
cuda::detail::TensorInfo<T, IndexType> src,
cuda::detail::TensorInfo<IndicesType, IndexType> indices,
int dstSelectDim,
int srcSelectDim,
IndexType totalSize,
IndexType innerSize,
int64_t srcSelectDimSize) {
// We stride over the output including the indexed dimension
// (totalSize), and calculate the destination index point based on that
for (IndexType linearIndex = blockIdx.x * blockDim.x + threadIdx.x;
linearIndex < totalSize;
linearIndex += gridDim.x * blockDim.x) {
IndexType dstIndex, elementInSlice;
if (IndexIsMajor) {
dstIndex = linearIndex / innerSize;
elementInSlice = linearIndex % innerSize;
}
else {
elementInSlice = linearIndex / innerSize;
dstIndex = linearIndex % innerSize;
}
IndexType srcIndex =
indices.data[cuda::detail::IndexToOffset<IndicesType, IndexType, IdxDim>::get(dstIndex, indices)];
CUDA_KERNEL_ASSERT(srcIndex < srcSelectDimSize);
IndexType dstOffset =
cuda::detail::IndexToOffset<T, IndexType, DstDim>::get(elementInSlice, dst);
dstOffset += dstIndex * dst.strides[dstSelectDim];
IndexType srcOffset =
cuda::detail::IndexToOffset<T, IndexType, SrcDim>::get(elementInSlice, src);
srcOffset += srcIndex * src.strides[srcSelectDim];
dst.data[dstOffset] = src.data[srcOffset];
}
}
namespace {
// When using a 0-dim scalar tensor, we need the legacy (THC) semantics of
// TensorInfo: Pretend that the scalar tensor is in fact a one-element vector.
template <typename T, typename IndexType>
cuda::detail::TensorInfo<T, IndexType>
tensorInfoLegacyIfScalar(cuda::detail::TensorInfo<T, IndexType> ti) {
if (ti.dims == 0) {
ti.dims = 1;
ti.sizes[0] = 1;
ti.strides[0] = 1;
}
return ti;
}
}
template <typename scalar_t>
void index_select_out_cuda_impl(
Tensor& out,
const Tensor& self,
long dim,
const Tensor& index) {
ptrdiff_t numIndices = index.numel();
int selfDims = self.dim() == 0 ? 1 : self.dim();
const cudaStream_t stream = at::cuda::getCurrentCUDAStream();
TORCH_CHECK(
index.dim() <= 1, "Index is supposed to be an empty tensor or a vector");
TORCH_CHECK(dim < selfDims, "Indexing dim is out of bounds");
std::vector<int64_t> newSize = self.sizes().vec();
if (self.dim() > 0) {
newSize[dim] = numIndices;
}
if (self.is_quantized()){
out = at::empty_quantized(newSize, out);
} else {
at::native::resize_output(out, newSize);
}
ptrdiff_t outTotalSize = out.numel();
if (outTotalSize == 0) {
return;
}
bool indContig = index.is_contiguous();
// The `self` is partitioned into two parts:
// -the size of each slice we are indexing, which is the
// total size of the tensor ignoring dimension `dim`;
// -the number of indices we are choosing, which is the total size
// of the tensor `indices`.
int64_t selfSelectDimSize = self.dim() == 0 ? 1 : self.size(dim);
ptrdiff_t sliceSize = outTotalSize / numIndices;
int mpc = at::cuda::getCurrentDeviceProperties()->multiProcessorCount;
#define SMALL_INDEX(TENSOR_TYPE, INDICES_TYPE, TYPE, DST_DIM, SRC_DIM, IDX_DIM) \
indexSelectSmallIndex<TENSOR_TYPE, INDICES_TYPE, TYPE, DST_DIM, SRC_DIM, IDX_DIM> \
<<<smallIndexGrid, smallIndexBlock, 0, stream>>>( \
outInfo, selfInfo, indicesInfo, \
outSelectDim, selfSelectDim, static_cast<TYPE>(sliceSize), \
selfSelectDimSize); \
C10_CUDA_KERNEL_LAUNCH_CHECK();
#define LARGE_INDEX(TENSOR_TYPE, INDICES_TYPE, TYPE, \
DST_DIM, SRC_DIM, IDX_DIM, IDX_IS_MAJOR) \
indexSelectLargeIndex<TENSOR_TYPE, INDICES_TYPE, TYPE, \
DST_DIM, SRC_DIM, IDX_DIM, IDX_IS_MAJOR> \
<<<largeIndexGrid, largeIndexBlock, 0, stream>>>( \
outInfo, selfInfo, indicesInfo, \
outSelectDim, selfSelectDim, static_cast<TYPE>(outTotalSize), \
static_cast<TYPE>((IDX_IS_MAJOR) ? sliceSize : numIndices), \
selfSelectDimSize); \
C10_CUDA_KERNEL_LAUNCH_CHECK();
dim3 smallIndexGrid(std::min(ceil_div(sliceSize, (ptrdiff_t)128), (ptrdiff_t)(mpc * 8)));
dim3 smallIndexBlock(std::min(sliceSize, (ptrdiff_t)128));
dim3 largeIndexGrid(std::min(ceil_div(outTotalSize, (ptrdiff_t)128), (ptrdiff_t)(mpc * 8)));
dim3 largeIndexBlock(std::min(outTotalSize, (ptrdiff_t)128));
if (cuda::detail::canUse32BitIndexMath(out) &&
cuda::detail::canUse32BitIndexMath(self) &&
cuda::detail::canUse32BitIndexMath(index)) {
auto outInfo = tensorInfoLegacyIfScalar(cuda::detail::getTensorInfo<scalar_t, unsigned int>(out));
int outSelectDim = outInfo.collapseDims(dim);
outInfo.reduceDim(outSelectDim);
auto selfInfo = tensorInfoLegacyIfScalar(cuda::detail::getTensorInfo<scalar_t, unsigned int>(self));
int selfSelectDim = selfInfo.collapseDims(dim);
selfInfo.reduceDim(selfSelectDim);
AT_DISPATCH_INDEX_TYPES(index.scalar_type(), "index_select_out_cuda_impl", [&] () {
auto indicesInfo = tensorInfoLegacyIfScalar(cuda::detail::getTensorInfo<index_t, unsigned int>(index));
indicesInfo.collapseDims();
// A reasonable choice for when to have each thread iterate over
// indices to choose
if (numIndices <= 16) {
if (outInfo.dims == 1 && selfInfo.dims == 1 && indContig) {
SMALL_INDEX(scalar_t, index_t, unsigned int, 1, 1, -2);
} else if (outInfo.dims == 2 && selfInfo.dims == 2 && indContig) {
SMALL_INDEX(scalar_t, index_t, unsigned int, 2, 2, -2);
} else if (outInfo.dims == 3 && selfInfo.dims == 3 && indContig) {
SMALL_INDEX(scalar_t, index_t, unsigned int, 3, 3, -2);
} else {
SMALL_INDEX(scalar_t, index_t, unsigned int, -1, -1, -1);
}
} else {
bool indexIsMajor = indexShouldBeMajor(outInfo, outSelectDim);
if (outInfo.dims == 1 && selfInfo.dims == 1 && indContig) {
LARGE_INDEX(scalar_t, index_t, unsigned int, 1, 1, -2, true);
} else if (outInfo.dims == 2 && selfInfo.dims == 2 && indContig) {
if (indexIsMajor) {
LARGE_INDEX(scalar_t, index_t, unsigned int, 2, 2, -2, true);
} else {
LARGE_INDEX(scalar_t, index_t, unsigned int, 2, 2, -2, false);
}
} else if (outInfo.dims == 3 && selfInfo.dims == 3 && indContig) {
if (indexIsMajor) {
LARGE_INDEX(scalar_t, index_t, unsigned int, 3, 3, -2, true);
} else {
LARGE_INDEX(scalar_t, index_t, unsigned int, 3, 3, -2, false);
}
} else {
LARGE_INDEX(scalar_t, index_t, unsigned int, -1, -1, -1, true);
}
}
});
} else {
auto outInfo = tensorInfoLegacyIfScalar(cuda::detail::getTensorInfo<scalar_t, uint64_t>(out));
int outSelectDim = outInfo.collapseDims(dim);
outInfo.reduceDim(outSelectDim);
auto selfInfo = tensorInfoLegacyIfScalar(cuda::detail::getTensorInfo<scalar_t, uint64_t>(self));
int selfSelectDim = selfInfo.collapseDims(dim);
selfInfo.reduceDim(selfSelectDim);
AT_DISPATCH_INDEX_TYPES(index.scalar_type(), "index_select_out_cuda_impl", [&] () {
auto indicesInfo = tensorInfoLegacyIfScalar(cuda::detail::getTensorInfo<index_t, uint64_t>(index));
indicesInfo.collapseDims();
LARGE_INDEX(scalar_t, index_t, uint64_t, -1, -1, -1, true);
});
}
#undef SMALL_INDEX
#undef LARGE_INDEX
}
} // anonymous namespace
Tensor& index_select_out_cuda(
const Tensor& self,
int64_t dim,
const Tensor& index,
Tensor& out) {
static constexpr string_view DIM_WARNING =
"Tensor too large or too many (> 25) dimensions";
TORCH_CHECK(
at::cuda::check_device({out, self, index}),
"Input, output and indices must be on the current device");
at::assert_no_internal_overlap(out);
at::assert_no_overlap(out, self);
at::assert_no_overlap(out, index);
dim = at::maybe_wrap_dim(dim, self);
TORCH_CHECK(self.dim() <= MAX_TENSORINFO_DIMS, DIM_WARNING);
TORCH_CHECK(index.dim() <= MAX_TENSORINFO_DIMS, DIM_WARNING);
if (self.is_quantized()){
TORCH_CHECK(
self.qscheme() == kPerTensorAffine,
"Only per_tensor quantized quantized tensors are supported by index_select.")
AT_DISPATCH_QINT_TYPES(out.scalar_type(), "index_select_quant_cuda", [&] {
index_select_out_cuda_impl<scalar_t>(out, self, dim, index);
});
} else {
AT_DISPATCH_ALL_TYPES_AND_COMPLEX_AND3(
at::ScalarType::Half,
at::ScalarType::Bool,
at::ScalarType::BFloat16,
out.scalar_type(),
"index_select_cuda",
[&] { index_select_out_cuda_impl<scalar_t>(out, self, dim, index); });
}
return out;
}
Tensor index_select_cuda(const Tensor& self, int64_t dim, const Tensor& index) {
Tensor out;
if (self.is_quantized()){
TORCH_CHECK(
self.qscheme() == kPerTensorAffine,
"Only per_tensor quantized quantized tensors are supported by index_select.")
out = at::empty_quantized({0}, self);
} else {
out = at::empty({0}, self.options());
}
at::native::index_select_out_cuda(self, dim, index, out);
return out;
}
namespace {
template <typename mask_t>
void masked_fill_kernel(TensorIterator& iter, const Scalar& value) {
AT_DISPATCH_ALL_TYPES_AND_COMPLEX_AND3(
kBool, kHalf, kBFloat16, iter.common_dtype(), "masked_fill_", [&]() {
const auto value_ = value.to<scalar_t>();
gpu_kernel(
iter, [value_] GPU_LAMBDA(scalar_t self, mask_t mask) -> scalar_t {
if (mask) {
return value_;
}
return self;
});
});
}
} // anonymous namespace
Tensor & masked_fill__cuda(Tensor& self, const Tensor & mask, const Scalar& value) {
TORCH_CHECK(self.device() == mask.device(), "expected self and mask to be on the same device, but got mask on ",
mask.device(), " and self on ", self.device());
TORCH_CHECK(mask.scalar_type() == kByte || mask.scalar_type() == kBool,
"expected mask dtype to be Bool but got ", mask.scalar_type());
auto maybe_outnames = namedinference::broadcast_to_outnames(self, mask, "masked_fill_");
if (at::has_internal_overlap(self) == MemOverlap::YES) {
TORCH_WARN(
"Use of masked_fill_ on expanded tensors is deprecated. "
"Please clone() the tensor before performing this operation. "
"This also applies to advanced indexing e.g. tensor[mask] = scalar");
}
at::assert_no_partial_overlap(self, mask);
c10::MaybeOwned<Tensor> b_mask = expand_inplace(self, mask, "masked_fill_");
auto iter = TensorIteratorConfig()
.set_check_mem_overlap(false)
.check_all_same_dtype(false)
.resize_outputs(false)
.add_output(self)
.add_input(self)
.add_input(*b_mask)
.build();
if (b_mask->dtype() == at::ScalarType::Byte) {
TORCH_WARN("masked_fill_ received a mask with dtype torch.uint8, this behavior is now deprecated," \
"please use a mask with dtype torch.bool instead.");
masked_fill_kernel<uint8_t>(iter, value);
} else {
masked_fill_kernel<bool>(iter, value);
}
namedinference::propagate_names_if_nonempty(self, maybe_outnames);
return self;
}
Tensor & masked_fill__cuda(Tensor& self, const Tensor & mask, const Tensor & value) {
TORCH_CHECK(value.dim() == 0, "masked_fill_ only supports a 0-dimensional value tensor, but got tensor "
"with ", value.dim(), " dimension(s).");
return masked_fill__cuda(self, mask, value.item());
}
} // native
} // at
|
a6231dbafc832d49deb888922396eb930a1e2706.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "includes.h"
/*
* JCudaVec - Vector operations for JCuda
* http://www.jcuda.org
*
* Copyright (c) 2013-2015 Marco Hutter - http://www.jcuda.org
*/
extern "C"
//=== Vector arithmetic ======================================================
extern "C"
extern "C"
extern "C"
extern "C"
extern "C"
//=== Vector-and-scalar arithmetic ===========================================
extern "C"
extern "C"
extern "C"
extern "C"
extern "C"
extern "C"
extern "C"
extern "C"
//=== Vector comparison ======================================================
extern "C"
extern "C"
extern "C"
extern "C"
extern "C"
extern "C"
//=== Vector-and-scalar comparison ===========================================
extern "C"
extern "C"
extern "C"
extern "C"
extern "C"
extern "C"
//=== Vector math (one argument) =============================================
// Calculate the arc cosine of the input argument.
extern "C"
// Calculate the nonnegative arc hyperbolic cosine of the input argument.
extern "C"
// Calculate the arc sine of the input argument.
extern "C"
// Calculate the arc hyperbolic sine of the input argument.
extern "C"
// Calculate the arc tangent of the input argument.
extern "C"
// Calculate the arc hyperbolic tangent of the input argument.
extern "C"
// Calculate the cube root of the input argument.
extern "C"
// Calculate ceiling of the input argument.
extern "C"
// Calculate the cosine of the input argument.
extern "C"
// Calculate the hyperbolic cosine of the input argument.
extern "C"
// Calculate the cosine of the input argument p .
extern "C"
// Calculate the complementary error function of the input argument.
extern "C"
// Calculate the inverse complementary error function of the input argument.
extern "C"
// Calculate the scaled complementary error function of the input argument.
extern "C"
// Calculate the error function of the input argument.
extern "C"
// Calculate the inverse error function of the input argument.
extern "C"
// Calculate the base 10 exponential of the input argument.
extern "C"
// Calculate the base 2 exponential of the input argument.
extern "C"
// Calculate the base e exponential of the input argument.
extern "C"
// Calculate the base e exponential of the input argument, minus 1.
extern "C"
// Calculate the absolute value of its argument.
extern "C"
// Calculate the largest integer less than or equal to x.
extern "C"
// Calculate the value of the Bessel function of the first kind of order 0 for the input argument.
extern "C"
// Calculate the value of the Bessel function of the first kind of order 1 for the input argument.
extern "C"
// Calculate the natural logarithm of the absolute value of the gamma function of the input argument.
extern "C"
// Calculate the base 10 logarithm of the input argument.
extern "C"
// Calculate the value of l o g e ( 1 + x ) .
extern "C"
// Calculate the base 2 logarithm of the input argument.
extern "C"
// Calculate the floating point representation of the exponent of the input argument.
extern "C"
// Calculate the natural logarithm of the input argument.
extern "C"
// Calculate the standard normal cumulative distribution function.
extern "C"
// Calculate the inverse of the standard normal cumulative distribution function.
extern "C"
// Calculate reciprocal cube root function.
extern "C"
// Round input to nearest integer value in floating-point.
extern "C"
// Round to nearest integer value in floating-point.
extern "C"
// Calculate the reciprocal of the square root of the input argument.
extern "C"
// Calculate the sine of the input argument.
extern "C"
// Calculate the hyperbolic sine of the input argument.
extern "C"
// Calculate the sine of the input argument p .
extern "C"
// Calculate the square root of the input argument.
extern "C"
// Calculate the tangent of the input argument.
extern "C"
// Calculate the hyperbolic tangent of the input argument.
extern "C"
// Calculate the gamma function of the input argument.
extern "C"
// Truncate input argument to the integral part.
extern "C"
// Calculate the value of the Bessel function of the second kind of order 0 for the input argument.
extern "C"
// Calculate the value of the Bessel function of the second kind of order 1 for the input argument.
extern "C"
//=== Vector math (two arguments) ============================================
// Create value with given magnitude, copying sign of second value.
extern "C"
// Compute the positive difference between x and y.
extern "C"
// Divide two floating point values.
extern "C"
// Determine the maximum numeric value of the arguments.
extern "C"
// Determine the minimum numeric value of the arguments.
extern "C"
// Calculate the floating-point remainder of x / y.
extern "C"
// Calculate the square root of the sum of squares of two arguments.
extern "C"
// Return next representable single-precision floating-point value afer argument.
extern "C"
// Calculate the value of first argument to the power of second argument.
extern "C"
// Compute single-precision floating-point remainder.
extern "C"
__global__ void vec_fabsf (size_t n, float *result, float *x)
{
int id = threadIdx.x + blockIdx.x * blockDim.x;
if (id < n)
{
result[id] = fabsf(x[id]);
}
} | a6231dbafc832d49deb888922396eb930a1e2706.cu | #include "includes.h"
/*
* JCudaVec - Vector operations for JCuda
* http://www.jcuda.org
*
* Copyright (c) 2013-2015 Marco Hutter - http://www.jcuda.org
*/
extern "C"
//=== Vector arithmetic ======================================================
extern "C"
extern "C"
extern "C"
extern "C"
extern "C"
//=== Vector-and-scalar arithmetic ===========================================
extern "C"
extern "C"
extern "C"
extern "C"
extern "C"
extern "C"
extern "C"
extern "C"
//=== Vector comparison ======================================================
extern "C"
extern "C"
extern "C"
extern "C"
extern "C"
extern "C"
//=== Vector-and-scalar comparison ===========================================
extern "C"
extern "C"
extern "C"
extern "C"
extern "C"
extern "C"
//=== Vector math (one argument) =============================================
// Calculate the arc cosine of the input argument.
extern "C"
// Calculate the nonnegative arc hyperbolic cosine of the input argument.
extern "C"
// Calculate the arc sine of the input argument.
extern "C"
// Calculate the arc hyperbolic sine of the input argument.
extern "C"
// Calculate the arc tangent of the input argument.
extern "C"
// Calculate the arc hyperbolic tangent of the input argument.
extern "C"
// Calculate the cube root of the input argument.
extern "C"
// Calculate ceiling of the input argument.
extern "C"
// Calculate the cosine of the input argument.
extern "C"
// Calculate the hyperbolic cosine of the input argument.
extern "C"
// Calculate the cosine of the input argument × p .
extern "C"
// Calculate the complementary error function of the input argument.
extern "C"
// Calculate the inverse complementary error function of the input argument.
extern "C"
// Calculate the scaled complementary error function of the input argument.
extern "C"
// Calculate the error function of the input argument.
extern "C"
// Calculate the inverse error function of the input argument.
extern "C"
// Calculate the base 10 exponential of the input argument.
extern "C"
// Calculate the base 2 exponential of the input argument.
extern "C"
// Calculate the base e exponential of the input argument.
extern "C"
// Calculate the base e exponential of the input argument, minus 1.
extern "C"
// Calculate the absolute value of its argument.
extern "C"
// Calculate the largest integer less than or equal to x.
extern "C"
// Calculate the value of the Bessel function of the first kind of order 0 for the input argument.
extern "C"
// Calculate the value of the Bessel function of the first kind of order 1 for the input argument.
extern "C"
// Calculate the natural logarithm of the absolute value of the gamma function of the input argument.
extern "C"
// Calculate the base 10 logarithm of the input argument.
extern "C"
// Calculate the value of l o g e ( 1 + x ) .
extern "C"
// Calculate the base 2 logarithm of the input argument.
extern "C"
// Calculate the floating point representation of the exponent of the input argument.
extern "C"
// Calculate the natural logarithm of the input argument.
extern "C"
// Calculate the standard normal cumulative distribution function.
extern "C"
// Calculate the inverse of the standard normal cumulative distribution function.
extern "C"
// Calculate reciprocal cube root function.
extern "C"
// Round input to nearest integer value in floating-point.
extern "C"
// Round to nearest integer value in floating-point.
extern "C"
// Calculate the reciprocal of the square root of the input argument.
extern "C"
// Calculate the sine of the input argument.
extern "C"
// Calculate the hyperbolic sine of the input argument.
extern "C"
// Calculate the sine of the input argument × p .
extern "C"
// Calculate the square root of the input argument.
extern "C"
// Calculate the tangent of the input argument.
extern "C"
// Calculate the hyperbolic tangent of the input argument.
extern "C"
// Calculate the gamma function of the input argument.
extern "C"
// Truncate input argument to the integral part.
extern "C"
// Calculate the value of the Bessel function of the second kind of order 0 for the input argument.
extern "C"
// Calculate the value of the Bessel function of the second kind of order 1 for the input argument.
extern "C"
//=== Vector math (two arguments) ============================================
// Create value with given magnitude, copying sign of second value.
extern "C"
// Compute the positive difference between x and y.
extern "C"
// Divide two floating point values.
extern "C"
// Determine the maximum numeric value of the arguments.
extern "C"
// Determine the minimum numeric value of the arguments.
extern "C"
// Calculate the floating-point remainder of x / y.
extern "C"
// Calculate the square root of the sum of squares of two arguments.
extern "C"
// Return next representable single-precision floating-point value afer argument.
extern "C"
// Calculate the value of first argument to the power of second argument.
extern "C"
// Compute single-precision floating-point remainder.
extern "C"
__global__ void vec_fabsf (size_t n, float *result, float *x)
{
int id = threadIdx.x + blockIdx.x * blockDim.x;
if (id < n)
{
result[id] = fabsf(x[id]);
}
} |
18fc26b206c39f0eae49ef632b115426444d2833.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/* Copyright 2019 Stanford
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include "alexnet.h"
#include "cuda_helper.h"
void DataLoader::load_input(const Task *task,
const std::vector<PhysicalRegion> ®ions,
Context ctx,
Runtime* runtime)
{
assert(regions.size() == 2);
assert(task->regions.size() == 2);
SampleIdxs* meta = (SampleIdxs*) task->local_args;
TensorAccessorR<float, 4> acc_full_input(
regions[0], task->regions[0], FID_DATA, ctx, runtime);
TensorAccessorW<float, 4> acc_batch_input(
regions[1], task->regions[1], FID_DATA, ctx, runtime, false/*readOutput*/);
int batch_size = acc_batch_input.rect.hi[3] - acc_batch_input.rect.lo[3] + 1;
int channels = acc_batch_input.rect.hi[2] - acc_batch_input.rect.lo[2] + 1;
int height = acc_batch_input.rect.hi[1] - acc_batch_input.rect.lo[1] + 1;
int width = acc_batch_input.rect.hi[0] - acc_batch_input.rect.lo[0] + 1;
//FIXME: currently assume continous indices
assert(batch_size == meta->num_samples);
for (int i = 1; i < batch_size; i++)
assert(meta->idxs[i] == meta->idxs[0] + i);
const float* input_zc = acc_full_input.ptr + meta->idxs[0] * channels * height * width;
hipLaunchKernelGGL(( copy_kernel), dim3(GET_BLOCKS(acc_batch_input.rect.volume())), dim3(CUDA_NUM_THREADS), 0, 0,
acc_batch_input.ptr, input_zc, acc_batch_input.rect.volume());
checkCUDA(hipDeviceSynchronize());
}
void DataLoader::load_label(const Task *task,
const std::vector<PhysicalRegion> ®ions,
Context ctx,
Runtime* runtime)
{
assert(regions.size() == 2);
assert(task->regions.size() == 2);
SampleIdxs* meta = (SampleIdxs*) task->local_args;
TensorAccessorR<int, 2> acc_full_label(
regions[0], task->regions[0], FID_DATA, ctx, runtime);
TensorAccessorW<int, 2> acc_batch_label(
regions[1], task->regions[1], FID_DATA, ctx, runtime, false/*readOutput*/);
int batch_size = acc_batch_label.rect.hi[1] - acc_batch_label.rect.lo[1] + 1;
//FIXME: currently assume continous indices
assert(batch_size == meta->num_samples);
for (int i = 1; i < batch_size; i++)
assert(meta->idxs[i] == meta->idxs[0] + i);
const int* input_zc = acc_full_label.ptr + meta->idxs[0];
hipLaunchKernelGGL(( copy_kernel), dim3(GET_BLOCKS(acc_batch_label.rect.volume())), dim3(CUDA_NUM_THREADS), 0, 0,
acc_batch_label.ptr, input_zc, acc_batch_label.rect.volume());
checkCUDA(hipDeviceSynchronize());
}
| 18fc26b206c39f0eae49ef632b115426444d2833.cu | /* Copyright 2019 Stanford
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include "alexnet.h"
#include "cuda_helper.h"
void DataLoader::load_input(const Task *task,
const std::vector<PhysicalRegion> ®ions,
Context ctx,
Runtime* runtime)
{
assert(regions.size() == 2);
assert(task->regions.size() == 2);
SampleIdxs* meta = (SampleIdxs*) task->local_args;
TensorAccessorR<float, 4> acc_full_input(
regions[0], task->regions[0], FID_DATA, ctx, runtime);
TensorAccessorW<float, 4> acc_batch_input(
regions[1], task->regions[1], FID_DATA, ctx, runtime, false/*readOutput*/);
int batch_size = acc_batch_input.rect.hi[3] - acc_batch_input.rect.lo[3] + 1;
int channels = acc_batch_input.rect.hi[2] - acc_batch_input.rect.lo[2] + 1;
int height = acc_batch_input.rect.hi[1] - acc_batch_input.rect.lo[1] + 1;
int width = acc_batch_input.rect.hi[0] - acc_batch_input.rect.lo[0] + 1;
//FIXME: currently assume continous indices
assert(batch_size == meta->num_samples);
for (int i = 1; i < batch_size; i++)
assert(meta->idxs[i] == meta->idxs[0] + i);
const float* input_zc = acc_full_input.ptr + meta->idxs[0] * channels * height * width;
copy_kernel<<<GET_BLOCKS(acc_batch_input.rect.volume()), CUDA_NUM_THREADS>>>(
acc_batch_input.ptr, input_zc, acc_batch_input.rect.volume());
checkCUDA(cudaDeviceSynchronize());
}
void DataLoader::load_label(const Task *task,
const std::vector<PhysicalRegion> ®ions,
Context ctx,
Runtime* runtime)
{
assert(regions.size() == 2);
assert(task->regions.size() == 2);
SampleIdxs* meta = (SampleIdxs*) task->local_args;
TensorAccessorR<int, 2> acc_full_label(
regions[0], task->regions[0], FID_DATA, ctx, runtime);
TensorAccessorW<int, 2> acc_batch_label(
regions[1], task->regions[1], FID_DATA, ctx, runtime, false/*readOutput*/);
int batch_size = acc_batch_label.rect.hi[1] - acc_batch_label.rect.lo[1] + 1;
//FIXME: currently assume continous indices
assert(batch_size == meta->num_samples);
for (int i = 1; i < batch_size; i++)
assert(meta->idxs[i] == meta->idxs[0] + i);
const int* input_zc = acc_full_label.ptr + meta->idxs[0];
copy_kernel<<<GET_BLOCKS(acc_batch_label.rect.volume()), CUDA_NUM_THREADS>>>(
acc_batch_label.ptr, input_zc, acc_batch_label.rect.volume());
checkCUDA(cudaDeviceSynchronize());
}
|
6cc114005146d9e7bc44c671c25d4d44be049b10.hip | // !!! This is a file automatically generated by hipify!!!
#include <stdbool.h>
#include <stdio.h>
#include <string.h>
#include <getopt.h>
#include <hiprand/hiprand_kernel.h>
#include <stdlib.h>
#include <hip/hip_runtime.h>
#include <sys/time.h>
#include "addArrays.cu"
#include<chrono>
#include<iostream>
using namespace std;
using namespace std::chrono;
int blocks_[20][2] = {{8,8},{16,16},{24,24},{32,32},{1,64},{1,128},{1,192},{1,256},{1,320},{1,384},{1,448},{1,512},{1,576},{1,640},{1,704},{1,768},{1,832},{1,896},{1,960},{1,1024}};
int matrices_[7][2] = {{240,240},{496,496},{784,784},{1016,1016},{1232,1232},{1680,1680},{2024,2024}};
int main(int argc, char **argv) {
hipSetDevice(0);
char* p;int matrix_len=strtol(argv[1], &p, 10);
for(int matrix_looper=0;matrix_looper<matrix_len;matrix_looper++){
for(int block_looper=0;block_looper<20;block_looper++){
int XSIZE=matrices_[matrix_looper][0],YSIZE=matrices_[matrix_looper][1],BLOCKX=blocks_[block_looper][0],BLOCKY=blocks_[block_looper][1];
int *a = NULL;
hipMalloc(&a, XSIZE*YSIZE);
int *b = NULL;
hipMalloc(&b, XSIZE*YSIZE);
int *c = NULL;
hipMalloc(&c, XSIZE*YSIZE);
int iXSIZE= XSIZE;
int iYSIZE= YSIZE;
while(iXSIZE%BLOCKX!=0)
{
iXSIZE++;
}
while(iYSIZE%BLOCKY!=0)
{
iYSIZE++;
}
dim3 gridBlock(iXSIZE/BLOCKX, iYSIZE/BLOCKY);
dim3 threadBlock(BLOCKX, BLOCKY);
hipFree(0);hipLaunchKernelGGL((
addArrays), dim3(gridBlock),dim3(threadBlock), 0, 0, a,b,c);
hipDeviceSynchronize();
for (int loop_counter = 0; loop_counter < 10; ++loop_counter) {hipLaunchKernelGGL((
addArrays), dim3(gridBlock),dim3(threadBlock), 0, 0, a,b,c);
}
auto start = steady_clock::now();
for (int loop_counter = 0; loop_counter < 1000; loop_counter++) {hipLaunchKernelGGL((
addArrays), dim3(gridBlock),dim3(threadBlock), 0, 0, a,b,c);
}
auto end = steady_clock::now();
auto usecs = duration_cast<duration<float, microseconds::period> >(end - start);
cout <<'['<<usecs.count()<<','<<'('<<BLOCKX<<','<<BLOCKY<<')' << ','<<'('<<XSIZE<<','<<YSIZE<<')'<<']' << endl;
}
}} | 6cc114005146d9e7bc44c671c25d4d44be049b10.cu | #include <stdbool.h>
#include <stdio.h>
#include <string.h>
#include <getopt.h>
#include <curand_kernel.h>
#include <stdlib.h>
#include <cuda.h>
#include <sys/time.h>
#include "addArrays.cu"
#include<chrono>
#include<iostream>
using namespace std;
using namespace std::chrono;
int blocks_[20][2] = {{8,8},{16,16},{24,24},{32,32},{1,64},{1,128},{1,192},{1,256},{1,320},{1,384},{1,448},{1,512},{1,576},{1,640},{1,704},{1,768},{1,832},{1,896},{1,960},{1,1024}};
int matrices_[7][2] = {{240,240},{496,496},{784,784},{1016,1016},{1232,1232},{1680,1680},{2024,2024}};
int main(int argc, char **argv) {
cudaSetDevice(0);
char* p;int matrix_len=strtol(argv[1], &p, 10);
for(int matrix_looper=0;matrix_looper<matrix_len;matrix_looper++){
for(int block_looper=0;block_looper<20;block_looper++){
int XSIZE=matrices_[matrix_looper][0],YSIZE=matrices_[matrix_looper][1],BLOCKX=blocks_[block_looper][0],BLOCKY=blocks_[block_looper][1];
int *a = NULL;
cudaMalloc(&a, XSIZE*YSIZE);
int *b = NULL;
cudaMalloc(&b, XSIZE*YSIZE);
int *c = NULL;
cudaMalloc(&c, XSIZE*YSIZE);
int iXSIZE= XSIZE;
int iYSIZE= YSIZE;
while(iXSIZE%BLOCKX!=0)
{
iXSIZE++;
}
while(iYSIZE%BLOCKY!=0)
{
iYSIZE++;
}
dim3 gridBlock(iXSIZE/BLOCKX, iYSIZE/BLOCKY);
dim3 threadBlock(BLOCKX, BLOCKY);
cudaFree(0);
addArrays<<<gridBlock,threadBlock>>>(a,b,c);
cudaDeviceSynchronize();
for (int loop_counter = 0; loop_counter < 10; ++loop_counter) {
addArrays<<<gridBlock,threadBlock>>>(a,b,c);
}
auto start = steady_clock::now();
for (int loop_counter = 0; loop_counter < 1000; loop_counter++) {
addArrays<<<gridBlock,threadBlock>>>(a,b,c);
}
auto end = steady_clock::now();
auto usecs = duration_cast<duration<float, microseconds::period> >(end - start);
cout <<'['<<usecs.count()<<','<<'('<<BLOCKX<<','<<BLOCKY<<')' << ','<<'('<<XSIZE<<','<<YSIZE<<')'<<']' << endl;
}
}} |
1b8245859f23603b1f1eddf8f658a677bf2ec4e2.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
ID3D11Device* device;
struct CUSTOMVERTEX {
FLOAT x, y, z;
DWORD color;
};
ID3D11Buffer* positionsVB;
struct cudaGraphicsResource* positionsVB_CUDA;
int main()
{
// Get a CUDA-enabled adapter
IDXGIFactory* factory;
CreateDXGIFactory(__uuidof(IDXGIFactory), (void**)&factory);
IDXGIAdapter* adapter = 0;
for (unsigned int i = 0; !adapter; ++i) {
if (FAILED(factory->EnumAdapters(i, &adapter))
break;
int dev;
if (hipD3D11GetDevice(&dev, adapter) == hipSuccess)
break;
adapter->Release();
}
factory->Release();
// Create swap chain and device
...
sFnPtr_D3D11CreateDeviceAndSwapChain(adapter,
D3D11_DRIVER_TYPE_HARDWARE,
0,
D3D11_CREATE_DEVICE_DEBUG,
featureLevels, 3,
D3D11_SDK_VERSION,
&swapChainDesc, &swapChain,
&device,
&featureLevel,
&deviceContext);
adapter->Release();
// Register device with CUDA
cudaD3D11SetDirect3DDevice(device);
// Create vertex buffer and register it with CUDA
unsigned int size = width * height * sizeof(CUSTOMVERTEX);
D3D11_BUFFER_DESC bufferDesc;
bufferDesc.Usage = D3D11_USAGE_DEFAULT;
bufferDesc.ByteWidth = size;
bufferDesc.BindFlags = D3D11_BIND_VERTEX_BUFFER;
bufferDesc.CPUAccessFlags = 0;
bufferDesc.MiscFlags = 0;
device->CreateBuffer(&bufferDesc, 0, &positionsVB);
hipGraphicsD3D11RegisterResource(&positionsVB_CUDA,
positionsVB,
hipGraphicsRegisterFlagsNone);
hipGraphicsResourceSetMapFlags(positionsVB_CUDA,
hipGraphicsMapFlagsWriteDiscard);
// Launch rendering loop
while (...) {
...
Render();
...
}
...
}
void Render()
{
// Map vertex buffer for writing from CUDA
float4* positions;
hipGraphicsMapResources(1, &positionsVB_CUDA, 0);
size_t num_bytes;
hipGraphicsResourceGetMappedPointer((void**)&positions,
&num_bytes,
positionsVB_CUDA));
// Execute kernel
dim3 dimBlock(16, 16, 1);
dim3 dimGrid(width / dimBlock.x, height / dimBlock.y, 1);
hipLaunchKernelGGL(( createVertices), dim3(dimGrid), dim3(dimBlock), 0, 0, positions, time,
width, height);
// Unmap vertex buffer
hipGraphicsUnmapResources(1, &positionsVB_CUDA, 0);
// Draw and present
...
}
void releaseVB()
{
hipGraphicsUnregisterResource(positionsVB_CUDA);
positionsVB->Release();
}
__global__ void createVertices(float4* positions, float time,
unsigned int width, unsigned int height)
{
unsigned int x = blockIdx.x * blockDim.x + threadIdx.x;
unsigned int y = blockIdx.y * blockDim.y + threadIdx.y;
// Calculate uv coordinates
float u = x / (float)width;
float v = y / (float)height;
u = u * 2.0f - 1.0f;
v = v * 2.0f - 1.0f;
// Calculate simple sine wave pattern
float freq = 4.0f;
float w = sinf(u * freq + time)
* cosf(v * freq + time) * 0.5f;
// Write positions
positions[y * width + x] =
make_float4(u, w, v, __int_as_float(0xff00ff00));
}
| 1b8245859f23603b1f1eddf8f658a677bf2ec4e2.cu | ID3D11Device* device;
struct CUSTOMVERTEX {
FLOAT x, y, z;
DWORD color;
};
ID3D11Buffer* positionsVB;
struct cudaGraphicsResource* positionsVB_CUDA;
int main()
{
// Get a CUDA-enabled adapter
IDXGIFactory* factory;
CreateDXGIFactory(__uuidof(IDXGIFactory), (void**)&factory);
IDXGIAdapter* adapter = 0;
for (unsigned int i = 0; !adapter; ++i) {
if (FAILED(factory->EnumAdapters(i, &adapter))
break;
int dev;
if (cudaD3D11GetDevice(&dev, adapter) == cudaSuccess)
break;
adapter->Release();
}
factory->Release();
// Create swap chain and device
...
sFnPtr_D3D11CreateDeviceAndSwapChain(adapter,
D3D11_DRIVER_TYPE_HARDWARE,
0,
D3D11_CREATE_DEVICE_DEBUG,
featureLevels, 3,
D3D11_SDK_VERSION,
&swapChainDesc, &swapChain,
&device,
&featureLevel,
&deviceContext);
adapter->Release();
// Register device with CUDA
cudaD3D11SetDirect3DDevice(device);
// Create vertex buffer and register it with CUDA
unsigned int size = width * height * sizeof(CUSTOMVERTEX);
D3D11_BUFFER_DESC bufferDesc;
bufferDesc.Usage = D3D11_USAGE_DEFAULT;
bufferDesc.ByteWidth = size;
bufferDesc.BindFlags = D3D11_BIND_VERTEX_BUFFER;
bufferDesc.CPUAccessFlags = 0;
bufferDesc.MiscFlags = 0;
device->CreateBuffer(&bufferDesc, 0, &positionsVB);
cudaGraphicsD3D11RegisterResource(&positionsVB_CUDA,
positionsVB,
cudaGraphicsRegisterFlagsNone);
cudaGraphicsResourceSetMapFlags(positionsVB_CUDA,
cudaGraphicsMapFlagsWriteDiscard);
// Launch rendering loop
while (...) {
...
Render();
...
}
...
}
void Render()
{
// Map vertex buffer for writing from CUDA
float4* positions;
cudaGraphicsMapResources(1, &positionsVB_CUDA, 0);
size_t num_bytes;
cudaGraphicsResourceGetMappedPointer((void**)&positions,
&num_bytes,
positionsVB_CUDA));
// Execute kernel
dim3 dimBlock(16, 16, 1);
dim3 dimGrid(width / dimBlock.x, height / dimBlock.y, 1);
createVertices<<<dimGrid, dimBlock>>>(positions, time,
width, height);
// Unmap vertex buffer
cudaGraphicsUnmapResources(1, &positionsVB_CUDA, 0);
// Draw and present
...
}
void releaseVB()
{
cudaGraphicsUnregisterResource(positionsVB_CUDA);
positionsVB->Release();
}
__global__ void createVertices(float4* positions, float time,
unsigned int width, unsigned int height)
{
unsigned int x = blockIdx.x * blockDim.x + threadIdx.x;
unsigned int y = blockIdx.y * blockDim.y + threadIdx.y;
// Calculate uv coordinates
float u = x / (float)width;
float v = y / (float)height;
u = u * 2.0f - 1.0f;
v = v * 2.0f - 1.0f;
// Calculate simple sine wave pattern
float freq = 4.0f;
float w = sinf(u * freq + time)
* cosf(v * freq + time) * 0.5f;
// Write positions
positions[y * width + x] =
make_float4(u, w, v, __int_as_float(0xff00ff00));
}
|
96c07a253b64d6fc0c791ee082500bd3ca323411.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "caffe2/operators/mod_op.h"
#include "caffe2/core/context_gpu.h"
namespace caffe2 {
namespace {
template <typename T>
__global__ void ModOpSimpleKernel(const int N, const int64_t divisor_,
const T* data_ptr, T* output_ptr) {
CUDA_1D_KERNEL_LOOP(i, N) {
output_ptr[i] = data_ptr[i] % divisor_;
}
}
template <typename T>
__global__ void ModOpKernel(const int N, const int64_t divisor_,
const T* data_ptr, T* output_ptr) {
CUDA_1D_KERNEL_LOOP(i, N) {
output_ptr[i] = data_ptr[i] % divisor_;
if (output_ptr[i] && ((output_ptr[i] > 0) != (divisor_ > 0))) {
output_ptr[i] += divisor_;
}
}
}
} // namespace
template <>
template <typename T>
bool ModOp<CUDAContext>::DoRunWithType() {
auto& data = Input(DATA);
auto N = data.numel();
const auto* data_ptr = data.template data<T>();
auto* output = Output(0, data.sizes(), at::dtype<T>());
auto* output_ptr = output->template mutable_data<T>();
if (sign_follow_divisor_) {
hipLaunchKernelGGL(( ModOpKernel),
dim3(CAFFE_GET_BLOCKS(N)),
dim3(CAFFE_CUDA_NUM_THREADS),
0,
context_.cuda_stream(),
N, divisor_, data_ptr, output_ptr);
} else {
hipLaunchKernelGGL(( ModOpSimpleKernel),
dim3(CAFFE_GET_BLOCKS(N)),
dim3(CAFFE_CUDA_NUM_THREADS),
0,
context_.cuda_stream(),
N, divisor_, data_ptr, output_ptr);
}
return true;
}
REGISTER_CUDA_OPERATOR(Mod, ModOp<CUDAContext>);
} // namespace caffe2
| 96c07a253b64d6fc0c791ee082500bd3ca323411.cu | #include "caffe2/operators/mod_op.h"
#include "caffe2/core/context_gpu.h"
namespace caffe2 {
namespace {
template <typename T>
__global__ void ModOpSimpleKernel(const int N, const int64_t divisor_,
const T* data_ptr, T* output_ptr) {
CUDA_1D_KERNEL_LOOP(i, N) {
output_ptr[i] = data_ptr[i] % divisor_;
}
}
template <typename T>
__global__ void ModOpKernel(const int N, const int64_t divisor_,
const T* data_ptr, T* output_ptr) {
CUDA_1D_KERNEL_LOOP(i, N) {
output_ptr[i] = data_ptr[i] % divisor_;
if (output_ptr[i] && ((output_ptr[i] > 0) != (divisor_ > 0))) {
output_ptr[i] += divisor_;
}
}
}
} // namespace
template <>
template <typename T>
bool ModOp<CUDAContext>::DoRunWithType() {
auto& data = Input(DATA);
auto N = data.numel();
const auto* data_ptr = data.template data<T>();
auto* output = Output(0, data.sizes(), at::dtype<T>());
auto* output_ptr = output->template mutable_data<T>();
if (sign_follow_divisor_) {
ModOpKernel<<<
CAFFE_GET_BLOCKS(N),
CAFFE_CUDA_NUM_THREADS,
0,
context_.cuda_stream()>>>(
N, divisor_, data_ptr, output_ptr);
} else {
ModOpSimpleKernel<<<
CAFFE_GET_BLOCKS(N),
CAFFE_CUDA_NUM_THREADS,
0,
context_.cuda_stream()>>>(
N, divisor_, data_ptr, output_ptr);
}
return true;
}
REGISTER_CUDA_OPERATOR(Mod, ModOp<CUDAContext>);
} // namespace caffe2
|
7cd89c1da8b25cfd5a7fec75f7b38a3d62f7ffd4.hip | // !!! This is a file automatically generated by hipify!!!
#include "utilities.cuh"
#include <hip/hip_runtime.h>
/*! \file utilities.cu
defines kernel callers and kernels for some simple GPU array calculations
\addtogroup utilityKernels
@{
*/
template <typename T>
__global__ void gpu_add_gpuarray_kernel(T *a, T *b, int N)
{
// read in the particle that belongs to this thread
unsigned int idx = blockDim.x * blockIdx.x + threadIdx.x;
if (idx >= N)
return;
a[idx] = a[idx]+b[idx];
return;
};
template<typename T>
bool gpu_add_gpuarray(GPUArray<T> &answer, GPUArray<T> &adder, int N, int maxBlockSize)
{
unsigned int block_size = maxBlockSize;
if (N < 128) block_size = 32;
unsigned int nblocks = (N)/block_size + 1;
ArrayHandle<T> a(answer,access_location::device,access_mode::readwrite);
ArrayHandle<T> b(adder,access_location::device,access_mode::read);
hipLaunchKernelGGL(( gpu_add_gpuarray_kernel), dim3(nblocks),dim3(block_size), 0, 0, a.data,b.data,N);
HANDLE_ERROR(hipGetLastError());
return hipSuccess;
}
/*!
A function of convenience... set an array on the device
*/
template <typename T>
__global__ void gpu_set_array_kernel(T *arr,T value, int N)
{
// read in the particle that belongs to this thread
unsigned int idx = blockDim.x * blockIdx.x + threadIdx.x;
if (idx >= N)
return;
arr[idx] = value;
return;
};
template<typename T>
bool gpu_set_array(T *array, T value, int N,int maxBlockSize)
{
unsigned int block_size = maxBlockSize;
if (N < 128) block_size = 16;
unsigned int nblocks = N/block_size + 1;
hipLaunchKernelGGL(( gpu_set_array_kernel), dim3(nblocks), dim3(block_size), 0, 0, array,value,N);
HANDLE_ERROR(hipGetLastError());
return hipSuccess;
}
template <typename T>
__global__ void gpu_copy_gpuarray_kernel(T *copyInto,T *copyFrom, int N)
{
// read in the particle that belongs to this thread
unsigned int idx = blockDim.x * blockIdx.x + threadIdx.x;
if (idx >= N)
return;
copyInto[idx] = copyFrom[idx];
return;
};
template<typename T>
bool gpu_copy_gpuarray(GPUArray<T> ©Into,GPUArray<T> ©From,int numberOfElementsToCopy,int maxBlockSize)
{
int N = copyFrom.getNumElements();
if(numberOfElementsToCopy >0)
N = numberOfElementsToCopy;
if(copyInto.getNumElements() < N)
copyInto.resize(N);
unsigned int block_size = maxBlockSize;
if (N < 128) block_size = 32;
unsigned int nblocks = (N)/block_size + 1;
ArrayHandle<T> ci(copyInto,access_location::device,access_mode::overwrite);
ArrayHandle<T> cf(copyFrom,access_location::device,access_mode::read);
hipLaunchKernelGGL(( gpu_copy_gpuarray_kernel), dim3(nblocks),dim3(block_size), 0, 0, ci.data,cf.data,N);
HANDLE_ERROR(hipGetLastError());
return hipSuccess;
}
//Declare templates used...cuda is annoying sometimes
template bool gpu_copy_gpuarray<double>(GPUArray<double> ©Into,GPUArray<double> ©From,int n, int maxBlockSize);
template bool gpu_copy_gpuarray<double2>(GPUArray<double2> ©Into,GPUArray<double2> ©From,int n, int maxBlockSize);
template bool gpu_copy_gpuarray<int>(GPUArray<int> ©Into,GPUArray<int> ©From,int n, int maxBlockSize);
template bool gpu_copy_gpuarray<int3>(GPUArray<int3> ©Into,GPUArray<int3> ©From,int n, int maxBlockSize);
template bool gpu_set_array<int>(int *,int, int, int);
template bool gpu_set_array<unsigned int>(unsigned int *,unsigned int, int, int);
template bool gpu_set_array<int2>(int2 *,int2, int, int);
template bool gpu_set_array<int3>(int3 *,int3, int, int);
template bool gpu_set_array<double>(double *,double, int, int);
template bool gpu_set_array<double2>(double2 *,double2, int, int);
template bool gpu_add_gpuarray<double>(GPUArray<double> &answer, GPUArray<double> &adder, int N, int maxBlockSize);
template bool gpu_add_gpuarray<double2>(GPUArray<double2> &answer, GPUArray<double2> &adder, int N, int maxBlockSize);
/** @} */ //end of group declaration
| 7cd89c1da8b25cfd5a7fec75f7b38a3d62f7ffd4.cu | #include "utilities.cuh"
#include <cuda_runtime.h>
/*! \file utilities.cu
defines kernel callers and kernels for some simple GPU array calculations
\addtogroup utilityKernels
@{
*/
template <typename T>
__global__ void gpu_add_gpuarray_kernel(T *a, T *b, int N)
{
// read in the particle that belongs to this thread
unsigned int idx = blockDim.x * blockIdx.x + threadIdx.x;
if (idx >= N)
return;
a[idx] = a[idx]+b[idx];
return;
};
template<typename T>
bool gpu_add_gpuarray(GPUArray<T> &answer, GPUArray<T> &adder, int N, int maxBlockSize)
{
unsigned int block_size = maxBlockSize;
if (N < 128) block_size = 32;
unsigned int nblocks = (N)/block_size + 1;
ArrayHandle<T> a(answer,access_location::device,access_mode::readwrite);
ArrayHandle<T> b(adder,access_location::device,access_mode::read);
gpu_add_gpuarray_kernel<<<nblocks,block_size>>>(a.data,b.data,N);
HANDLE_ERROR(cudaGetLastError());
return cudaSuccess;
}
/*!
A function of convenience... set an array on the device
*/
template <typename T>
__global__ void gpu_set_array_kernel(T *arr,T value, int N)
{
// read in the particle that belongs to this thread
unsigned int idx = blockDim.x * blockIdx.x + threadIdx.x;
if (idx >= N)
return;
arr[idx] = value;
return;
};
template<typename T>
bool gpu_set_array(T *array, T value, int N,int maxBlockSize)
{
unsigned int block_size = maxBlockSize;
if (N < 128) block_size = 16;
unsigned int nblocks = N/block_size + 1;
gpu_set_array_kernel<<<nblocks, block_size>>>(array,value,N);
HANDLE_ERROR(cudaGetLastError());
return cudaSuccess;
}
template <typename T>
__global__ void gpu_copy_gpuarray_kernel(T *copyInto,T *copyFrom, int N)
{
// read in the particle that belongs to this thread
unsigned int idx = blockDim.x * blockIdx.x + threadIdx.x;
if (idx >= N)
return;
copyInto[idx] = copyFrom[idx];
return;
};
template<typename T>
bool gpu_copy_gpuarray(GPUArray<T> ©Into,GPUArray<T> ©From,int numberOfElementsToCopy,int maxBlockSize)
{
int N = copyFrom.getNumElements();
if(numberOfElementsToCopy >0)
N = numberOfElementsToCopy;
if(copyInto.getNumElements() < N)
copyInto.resize(N);
unsigned int block_size = maxBlockSize;
if (N < 128) block_size = 32;
unsigned int nblocks = (N)/block_size + 1;
ArrayHandle<T> ci(copyInto,access_location::device,access_mode::overwrite);
ArrayHandle<T> cf(copyFrom,access_location::device,access_mode::read);
gpu_copy_gpuarray_kernel<<<nblocks,block_size>>>(ci.data,cf.data,N);
HANDLE_ERROR(cudaGetLastError());
return cudaSuccess;
}
//Declare templates used...cuda is annoying sometimes
template bool gpu_copy_gpuarray<double>(GPUArray<double> ©Into,GPUArray<double> ©From,int n, int maxBlockSize);
template bool gpu_copy_gpuarray<double2>(GPUArray<double2> ©Into,GPUArray<double2> ©From,int n, int maxBlockSize);
template bool gpu_copy_gpuarray<int>(GPUArray<int> ©Into,GPUArray<int> ©From,int n, int maxBlockSize);
template bool gpu_copy_gpuarray<int3>(GPUArray<int3> ©Into,GPUArray<int3> ©From,int n, int maxBlockSize);
template bool gpu_set_array<int>(int *,int, int, int);
template bool gpu_set_array<unsigned int>(unsigned int *,unsigned int, int, int);
template bool gpu_set_array<int2>(int2 *,int2, int, int);
template bool gpu_set_array<int3>(int3 *,int3, int, int);
template bool gpu_set_array<double>(double *,double, int, int);
template bool gpu_set_array<double2>(double2 *,double2, int, int);
template bool gpu_add_gpuarray<double>(GPUArray<double> &answer, GPUArray<double> &adder, int N, int maxBlockSize);
template bool gpu_add_gpuarray<double2>(GPUArray<double2> &answer, GPUArray<double2> &adder, int N, int maxBlockSize);
/** @} */ //end of group declaration
|
a346cf29c22725c0ef4e4d9d535892bcaeb4cbd0.hip | // !!! This is a file automatically generated by hipify!!!
// CUDA approach for solving the N-Queens problem (Pierce Burke and Zachary Bowditch)
#include <stdlib.h>
#include <stdio.h>
#include <string.h>
#include <math.h>
#include <time.h>
#include <string.h>
#include <hip/hip_runtime.h>
// include CUDA files
#include <hip/hip_runtime.h>
#include "inc/helper_functions.h"
#include "inc/helper_cuda.h"
#define N 8
int no_solutions; // increment every time a solution is found
int save_data(const char *filename, int *souls, double *times, int n){ //Helper function used to save the data as a file
FILE *f;
f = fopen(filename, "w");
for(int i = 0; i < n; i++){
fprintf(f, "%d:%f:%d\n", i, times[i], souls[i]);
}
fclose(f);
return 1;
}
int is_valid(int* sol, int n, int row, int col){ //function to check if a queen can be placed in a square without conflict
if(row >= n) return 0;
for(int c = 0; c <= col; c++){ //Use C++ in C :..)
if(sol[c] == row) return 0;
if(sol[c] - c == row - col) return 0;
if(n - sol[c] - c == n - row - col) return 0;
}
return 1;
}
long factorial(long num){ //calculate factorial, used for upperbound of partial solutions
if(num == 0) return 1;
return num*factorial(num - 1);
}
__device__ int d_is_valid(int* sol, int n, int row, int col){ //Device function for kernel to check if queen can be placed
if(row >= n) return 0;
for(int c = 0; c <= col; c++){ //Use C++ in C :..)
if(sol[c] == row) return 0;
if(sol[c] - c == row - col) return 0;
if(n - sol[c] - c == n - row - col) return 0;
}
return 1;
}
__global__ void solve(int *psols, int *souls, int n, int partials, int start_col){ //Kernel for solving partial solutions
/* psouls is an array of all partial solutions
souls is an array for the number of souls each partial solution yields
n is the size of the board/number of queens needed to be placed
partials is number of partial solutions generated
start col is how far the board is completed already
*/
int index = blockIdx.x*blockDim.x + threadIdx.x; //get thread ID
if(index >= partials) return; //limit which threads run
int row = 0, col = start_col;
int temp = 0; //solution counter
souls[index] = 0; //array of all number of solutions
int sol[N]; //array to store partial solution
for(int i = 0; i < n; i++) sol[i] = psols[index*n + i]; //copy partial solution into a seperate array
while(1){
if(d_is_valid(sol, n, row, col)){ //check if queen can be placed
sol[col] = row; //place queen in that row/col
row = 0; //set row back to 0 and go to the next column
col++;
if(col == n){ //if its a solution then increment solution counter then continue
temp++;
row++;
}
} else {
row++; //if queen cant be placed in that row, try the next row
}
if(row >= n){ //if no more rows to try, backtrack
sol[col] = -1; //set current col back to empty
col--; //go to previous column
row = sol[col] + 1; //try the next row
}
if(col < start_col) break; //exit condition
}
souls[index] = temp; //store number of solution this thread found
__syncthreads();
}
int generate_partial_solutions(int* psouls, int depth, int n, int threads){ //Function used to generate partial solutions
/*psouls is the array to store partial solutions in
depth is how many columns you want to generate the partial solutions up to
n is length of board/number of queens
threads is unused variable from previous implementation
works in a similar way to the serial iterative solution, except the exit criteria is at the depth and not the last column
*/
// Start at first partial solution
int psi = 0;
int sol[n];
int row = 0, col = 0;
for(int i = 0; i < n; i++) sol[i] = -1; // initialize partial solution to -1
while(1){
if(is_valid(sol, n, row, col)){ //check if a queen can be placed
sol[col] = row; //if it can place it in that row/column
row = 0; //reset row
col++; //go to next column
if(col == depth){ //if it is a partial solution, store the current board configuration as a partial solution
for(int i = 0; i < n; i++)
psouls[psi*n + i] = sol[i];
psi++; //increment counter for number of solutions
row++; //continue
}
} else {
row++; //else try different row
}
if(row >= n){ //backtrack condition
sol[col--] = -1;
row = sol[col] + 1;
}
if(col == 0 && row >= n) break; //exit condition
}
return psi;
}
int solve_partial_sols(int* psouls, int start_col, int n, int partials){ //serial version to solve partial solutions
/* psouls is array of partial solutions
start_col is the column where the partial solution ends
n is length of board/ number of queens required
partials is the number of partial solutions generated
*/
int solutions = 0; //solution counter
int row, col;
int sol[n] ;
for(int t = 0; t < partials; t++){ //go through each partial solution
//printf("T: %d\n", t);
row = 0;
col = start_col;
for(int k = 0; k < n; k++){ //copy the partial solution we are dealing with from psouls into sol
sol[k] = psouls[t*n + k] ;
// printf("%d ", sol[k]);
// printf("partial sol: %d \n", sol[k]) ;
}
//printf("\n");
while(1){
int valid = is_valid(sol, n, row, col); //check if queen can be placed
if(valid){
sol[col] = row; //if it can place queen
row = 0; //reset row
col++; //increment column to next column
if(col == n){ //if solution found
solutions++; //increment solution found
row++; //continue
}
} else {
row++; //else try a different row
}
if(row >= n){ //backtrack condition, if no more rows to place queen
sol[col] = -1; //set current row/col to unexplored
col--; //go back to previous col
row = sol[col] + 1; //try next avaliable row
}
if(col < start_col) break; //exit condition, backtracked to start
}
}
return solutions; //return totoal number of solutins found
}
int main(int argc, char *argv[]){
int *ps; //partial solutions array
int depth = atoi(argv[1]); //input how many columns should be explored in generating partial solutions
int n = N; //set size of board
int threads; //upperbound on threads required
long numerator = factorial((long)n);
long denominator = factorial((long)n - (long)depth);
threads = numerator / denominator; //calculate upper bound of threads required/partial solutions generated
ps = (int*) malloc(sizeof(int) * threads * n); //allocate upper bound of memory for number of partials generated
hipEvent_t start, end, solve_start; //timer, also times generating partial solutions
hipEventCreate(&start);
hipEventCreate(&end);
hipEventRecord(start, 0);
int partials = generate_partial_solutions(ps, depth, n, threads); //generate partial solutions in serial
/*
for(int i = 0; i < partials; i++){
for(int j = 0; j < n; j++)
printf("%d ", ps[i*n + j]);
printf("\n");
}
*/
// num_souls = solve_partial_sols(ps,depth,n, partials) ;
// printf("Main\n");
// printf("Number of souls: %d \n", num_souls) ;
/*
for(int i = 0; i < partials; i++){
for(int j = 0; j < n; j++)
printf("%d ", ps[i*n + j]);
printf("\n");
}
*/
int size = sizeof(int)*partials*n;
int block_threads = 1024;
int blocks = ceil(partials / block_threads); //calculate block/grid size to pass kernel
if(blocks == 0) blocks++; //incase rounding error
int *d_ps; //partial solution array for device
hipEventCreate(&solve_start); //start timer for only the kernel time
hipEventRecord(solve_start, 0);
checkCudaErrors(hipMalloc((int**)&d_ps, size));
checkCudaErrors(hipMemcpy(d_ps, ps, size, hipMemcpyHostToDevice)); //malloc partial solution array for device
int *no_sols = (int*) malloc(sizeof(int) * partials); //malloc array of solutions.
int *d_no_sols;
checkCudaErrors(hipMalloc((int**)&d_no_sols, sizeof(int) * partials));
checkCudaErrors(hipMemcpy(d_no_sols, no_sols, sizeof(int) * partials, hipMemcpyHostToDevice));
hipLaunchKernelGGL(( solve), dim3(blocks), dim3(block_threads) , 0, 0, d_ps, d_no_sols, n, partials, depth); //run kernel
hipEventRecord(end, 0); //just timer things
hipEventSynchronize(end);
float time = 0;
float solve_time = 0;
hipEventElapsedTime(&time, start, end);
hipEventElapsedTime(&solve_time, solve_start, end);
printf("Size: %d depth: %d ", n, depth);
printf("Partial boards: %d ", partials);
printf("Total_time: %.6f ", time/1000.0);
printf("Solve_time: %.6f\n", solve_time/1000.0);
checkCudaErrors(hipMemcpy(no_sols, d_no_sols, sizeof(int) * partials, hipMemcpyDeviceToHost)); //copy data back from device
//printf("Threads: %d\n", threads);
int no_solutions = 0;
for(int i = 0; i < partials; i++){
if(no_sols[i] > -1){
// printf("%d ", no_sols[i]);
no_solutions += no_sols[i]; //sum up number of solutions in serial
}
}
//Free arrays
checkCudaErrors(hipFree(d_ps));
free(ps);
return(0);
}
| a346cf29c22725c0ef4e4d9d535892bcaeb4cbd0.cu | // CUDA approach for solving the N-Queens problem (Pierce Burke and Zachary Bowditch)
#include <stdlib.h>
#include <stdio.h>
#include <string.h>
#include <math.h>
#include <time.h>
#include <string.h>
#include <cuda.h>
// include CUDA files
#include <cuda_runtime.h>
#include "inc/helper_functions.h"
#include "inc/helper_cuda.h"
#define N 8
int no_solutions; // increment every time a solution is found
int save_data(const char *filename, int *souls, double *times, int n){ //Helper function used to save the data as a file
FILE *f;
f = fopen(filename, "w");
for(int i = 0; i < n; i++){
fprintf(f, "%d:%f:%d\n", i, times[i], souls[i]);
}
fclose(f);
return 1;
}
int is_valid(int* sol, int n, int row, int col){ //function to check if a queen can be placed in a square without conflict
if(row >= n) return 0;
for(int c = 0; c <= col; c++){ //Use C++ in C :..)
if(sol[c] == row) return 0;
if(sol[c] - c == row - col) return 0;
if(n - sol[c] - c == n - row - col) return 0;
}
return 1;
}
long factorial(long num){ //calculate factorial, used for upperbound of partial solutions
if(num == 0) return 1;
return num*factorial(num - 1);
}
__device__ int d_is_valid(int* sol, int n, int row, int col){ //Device function for kernel to check if queen can be placed
if(row >= n) return 0;
for(int c = 0; c <= col; c++){ //Use C++ in C :..)
if(sol[c] == row) return 0;
if(sol[c] - c == row - col) return 0;
if(n - sol[c] - c == n - row - col) return 0;
}
return 1;
}
__global__ void solve(int *psols, int *souls, int n, int partials, int start_col){ //Kernel for solving partial solutions
/* psouls is an array of all partial solutions
souls is an array for the number of souls each partial solution yields
n is the size of the board/number of queens needed to be placed
partials is number of partial solutions generated
start col is how far the board is completed already
*/
int index = blockIdx.x*blockDim.x + threadIdx.x; //get thread ID
if(index >= partials) return; //limit which threads run
int row = 0, col = start_col;
int temp = 0; //solution counter
souls[index] = 0; //array of all number of solutions
int sol[N]; //array to store partial solution
for(int i = 0; i < n; i++) sol[i] = psols[index*n + i]; //copy partial solution into a seperate array
while(1){
if(d_is_valid(sol, n, row, col)){ //check if queen can be placed
sol[col] = row; //place queen in that row/col
row = 0; //set row back to 0 and go to the next column
col++;
if(col == n){ //if its a solution then increment solution counter then continue
temp++;
row++;
}
} else {
row++; //if queen cant be placed in that row, try the next row
}
if(row >= n){ //if no more rows to try, backtrack
sol[col] = -1; //set current col back to empty
col--; //go to previous column
row = sol[col] + 1; //try the next row
}
if(col < start_col) break; //exit condition
}
souls[index] = temp; //store number of solution this thread found
__syncthreads();
}
int generate_partial_solutions(int* psouls, int depth, int n, int threads){ //Function used to generate partial solutions
/*psouls is the array to store partial solutions in
depth is how many columns you want to generate the partial solutions up to
n is length of board/number of queens
threads is unused variable from previous implementation
works in a similar way to the serial iterative solution, except the exit criteria is at the depth and not the last column
*/
// Start at first partial solution
int psi = 0;
int sol[n];
int row = 0, col = 0;
for(int i = 0; i < n; i++) sol[i] = -1; // initialize partial solution to -1
while(1){
if(is_valid(sol, n, row, col)){ //check if a queen can be placed
sol[col] = row; //if it can place it in that row/column
row = 0; //reset row
col++; //go to next column
if(col == depth){ //if it is a partial solution, store the current board configuration as a partial solution
for(int i = 0; i < n; i++)
psouls[psi*n + i] = sol[i];
psi++; //increment counter for number of solutions
row++; //continue
}
} else {
row++; //else try different row
}
if(row >= n){ //backtrack condition
sol[col--] = -1;
row = sol[col] + 1;
}
if(col == 0 && row >= n) break; //exit condition
}
return psi;
}
int solve_partial_sols(int* psouls, int start_col, int n, int partials){ //serial version to solve partial solutions
/* psouls is array of partial solutions
start_col is the column where the partial solution ends
n is length of board/ number of queens required
partials is the number of partial solutions generated
*/
int solutions = 0; //solution counter
int row, col;
int sol[n] ;
for(int t = 0; t < partials; t++){ //go through each partial solution
//printf("T: %d\n", t);
row = 0;
col = start_col;
for(int k = 0; k < n; k++){ //copy the partial solution we are dealing with from psouls into sol
sol[k] = psouls[t*n + k] ;
// printf("%d ", sol[k]);
// printf("partial sol: %d \n", sol[k]) ;
}
//printf("\n");
while(1){
int valid = is_valid(sol, n, row, col); //check if queen can be placed
if(valid){
sol[col] = row; //if it can place queen
row = 0; //reset row
col++; //increment column to next column
if(col == n){ //if solution found
solutions++; //increment solution found
row++; //continue
}
} else {
row++; //else try a different row
}
if(row >= n){ //backtrack condition, if no more rows to place queen
sol[col] = -1; //set current row/col to unexplored
col--; //go back to previous col
row = sol[col] + 1; //try next avaliable row
}
if(col < start_col) break; //exit condition, backtracked to start
}
}
return solutions; //return totoal number of solutins found
}
int main(int argc, char *argv[]){
int *ps; //partial solutions array
int depth = atoi(argv[1]); //input how many columns should be explored in generating partial solutions
int n = N; //set size of board
int threads; //upperbound on threads required
long numerator = factorial((long)n);
long denominator = factorial((long)n - (long)depth);
threads = numerator / denominator; //calculate upper bound of threads required/partial solutions generated
ps = (int*) malloc(sizeof(int) * threads * n); //allocate upper bound of memory for number of partials generated
cudaEvent_t start, end, solve_start; //timer, also times generating partial solutions
cudaEventCreate(&start);
cudaEventCreate(&end);
cudaEventRecord(start, 0);
int partials = generate_partial_solutions(ps, depth, n, threads); //generate partial solutions in serial
/*
for(int i = 0; i < partials; i++){
for(int j = 0; j < n; j++)
printf("%d ", ps[i*n + j]);
printf("\n");
}
*/
// num_souls = solve_partial_sols(ps,depth,n, partials) ;
// printf("Main\n");
// printf("Number of souls: %d \n", num_souls) ;
/*
for(int i = 0; i < partials; i++){
for(int j = 0; j < n; j++)
printf("%d ", ps[i*n + j]);
printf("\n");
}
*/
int size = sizeof(int)*partials*n;
int block_threads = 1024;
int blocks = ceil(partials / block_threads); //calculate block/grid size to pass kernel
if(blocks == 0) blocks++; //incase rounding error
int *d_ps; //partial solution array for device
cudaEventCreate(&solve_start); //start timer for only the kernel time
cudaEventRecord(solve_start, 0);
checkCudaErrors(cudaMalloc((int**)&d_ps, size));
checkCudaErrors(cudaMemcpy(d_ps, ps, size, cudaMemcpyHostToDevice)); //malloc partial solution array for device
int *no_sols = (int*) malloc(sizeof(int) * partials); //malloc array of solutions.
int *d_no_sols;
checkCudaErrors(cudaMalloc((int**)&d_no_sols, sizeof(int) * partials));
checkCudaErrors(cudaMemcpy(d_no_sols, no_sols, sizeof(int) * partials, cudaMemcpyHostToDevice));
solve<<< blocks, block_threads >>>(d_ps, d_no_sols, n, partials, depth); //run kernel
cudaEventRecord(end, 0); //just timer things
cudaEventSynchronize(end);
float time = 0;
float solve_time = 0;
cudaEventElapsedTime(&time, start, end);
cudaEventElapsedTime(&solve_time, solve_start, end);
printf("Size: %d depth: %d ", n, depth);
printf("Partial boards: %d ", partials);
printf("Total_time: %.6f ", time/1000.0);
printf("Solve_time: %.6f\n", solve_time/1000.0);
checkCudaErrors(cudaMemcpy(no_sols, d_no_sols, sizeof(int) * partials, cudaMemcpyDeviceToHost)); //copy data back from device
//printf("Threads: %d\n", threads);
int no_solutions = 0;
for(int i = 0; i < partials; i++){
if(no_sols[i] > -1){
// printf("%d ", no_sols[i]);
no_solutions += no_sols[i]; //sum up number of solutions in serial
}
}
//Free arrays
checkCudaErrors(cudaFree(d_ps));
free(ps);
return(0);
}
|
26e515ee88e7eff1d37c77541e804074da9039b0.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/*-----------------------------------------------------------------------------
..Name: GPU_KNN_MED.cu
Desc: This file contains the KNN kd-tree GPU kernel
by Shawn Brown ([email protected])
-----------------------------------------------------------------------------*/
#ifndef _GPU_KNN_2D_MED_H_
#define _GPU_KNN_2D_MED_H_
/*---------------------------------------------------------
Includes
---------------------------------------------------------*/
#include <stdio.h>
//#include <float.h>
#include "GPUTREE_API.h"
/*---------------------------------------------------------
Function Definitions
---------------------------------------------------------*/
/*---------------------------------------------------------
Name: GPU_KNN_2D_MED
Desc: Finds the 'k' Nearest Neighbors in
a search set 'S' for each query point in set 'Q'
Notes: 'S' is stored and implemented via a
static balanced cyclical kd-tree.
---------------------------------------------------------*/
__global__ void
GPU_KNN_2D_MED
(
GPU_NN_Result * qrs, // OUT: Results of KD Nearest Neighbor Algorithm
float2 * qps, // IN: query points to compute k nearest neighbors for...
GPUNode_2D_MED * kdTree, // IN: KD Tree (Nodes)
unsigned int * ids, // IN: IDs (from Indexs)
int rootIdx, // IN: index of root node in KD Tree
unsigned int k // IN: number of nearest neighbors to find
)
{
// Per Thread Local Parameters (shared memory)
__shared__ GPUNode_2D_MED currNodes[KNN_THREADS_PER_BLOCK]; // Current kd-tree node
__shared__ GPU_Search searchStack[KNN_STACK_SIZE][KNN_THREADS_PER_BLOCK]; // Search Stack
__shared__ GPU_NN_Result knnHeap[KD_KNN_SIZE][KNN_THREADS_PER_BLOCK]; // 'k' NN Closest Heap
__shared__ float2 queryPoints[KNN_THREADS_PER_BLOCK]; // Query Point
// Per Thread Local Parameters (registers)
unsigned int currIdx, currAxis, currInOut, nextAxis;
unsigned int stackTop, maxHeap, countHeap;
float dx, dy, diff, diff2, diffDist2;
float queryValue, splitValue;
float dist2Heap, bestDist2;
int tidx, width, currRow, currCol, qidx;
// Compute Thread index
tidx = (threadIdx.y*blockDim.x) + threadIdx.x;
// Compute Query Index
width = gridDim.x * blockDim.x;
currRow = (blockIdx.y * blockDim.y) + threadIdx.y;
currCol = (blockIdx.x * blockDim.x) + threadIdx.x;
qidx = (currRow * width) + currCol;
// Load current Query Point
// read from slower RAM into faster shared memory
queryPoints[tidx] = qps[qidx];
// Compute number of elements (in grid)
int height = gridDim.y * blockDim.y;
int nElems = height * width;
// Search Stack Variables
stackTop = 0;
// 'k' NN Heap variables
maxHeap = k; // Maximum # elements on knnHeap
countHeap = 0; // Current # elements on knnHeap
dist2Heap = 0.0f; // Max Dist of any element on heap
bestDist2 = 3.0e38f;
// Put root search info on stack
searchStack[stackTop][tidx].nodeFlags = (rootIdx & 0x1FFFFFFF); // | ((currAxis << 29) & 0x60000000); // | ((currInOut << 31) & 0x8000000);;
searchStack[stackTop][tidx].splitVal = 3.0e+38F;
stackTop++;
while (stackTop != 0)
{
// Statistics
//best.cNodes++;
// Get Current Node from top of stack
stackTop--;
// Get Node Info
currIdx = (searchStack[stackTop][tidx].nodeFlags & 0x1FFFFFFFU);
currAxis = (searchStack[stackTop][tidx].nodeFlags & 0x60000000U) >> 29;
currInOut = (searchStack[stackTop][tidx].nodeFlags & 0x80000000U) >> 31;
nextAxis = ((currAxis == 0) ? 1 : 0);
// Early Exit Check
if (currInOut == 1) // KD_OUT
{
if (countHeap == maxHeap) // Is heap full yet ?!?
{
// Next Line is effectively queryValue = queryPoints[prevAxis];
queryValue = ((currAxis == 0) ? queryPoints[tidx].y : queryPoints[tidx].x);
splitValue = searchStack[stackTop][tidx].splitVal; // Split Value of Parent Node
diff = splitValue - queryValue;
diff2 = diff*diff;
if (diff2 >= dist2Heap)
{
// We can do an early exit for this node
continue;
}
}
}
// WARNING - It's Much faster to load this node from global memory after the "Early Exit check" !!!
// Load current node
// read from slower RAM into faster shared memory
currNodes[tidx] = kdTree[currIdx];
// Get Best Fit Dist for checking child ranges
queryValue = ((currAxis == 0) ? queryPoints[tidx].x : queryPoints[tidx].y);
splitValue = ((currAxis == 0) ? currNodes[tidx].pos[0] : currNodes[tidx].pos[1]);
diff = splitValue - queryValue;
diff2 = diff*diff;
// Calc Dist from Median Node to queryLocation
dx = currNodes[tidx].pos[0] - queryPoints[tidx].x;
dy = currNodes[tidx].pos[1] - queryPoints[tidx].y;
diffDist2 = (dx*dx) + (dy*dy);
// See if we should add this point to the 'k' NN Heap
if (countHeap < maxHeap)
{
//-------------------------------
// < 'k' elements on heap
// Do Simple Array append
//-------------------------------
countHeap++;
knnHeap[countHeap][tidx].Id = currIdx;
knnHeap[countHeap][tidx].Dist = diffDist2;
// Do we need to convert the array into a max distance heap ?!?
if (countHeap == maxHeap)
{
// Yes, turn array into a heap, takes O(k) time
for (unsigned int z = countHeap/2; z >= 1; z--)
{
//
// Demote each element in turn (to correct position in heap)
//
unsigned int parentHIdx = z; // Start at specified element
unsigned int childHIdx = z << 1; // left child of parent
// Compare Parent to it's children
while (childHIdx <= maxHeap)
{
// Update Distances
float parentD2 = knnHeap[parentHIdx][tidx].Dist;
float childD2 = knnHeap[childHIdx][tidx].Dist;
// Find largest child
if (childHIdx < maxHeap)
{
float rightD2 = knnHeap[childHIdx+1][tidx].Dist;
if (childD2 < rightD2)
{
// Use right child
childHIdx++;
childD2 = rightD2;
}
}
// Compare largest child to parent
if (parentD2 >= childD2)
{
// Parent is larger than both children, exit loop
break;
}
// Demote parent by swapping with it's largest child
GPU_NN_Result closeTemp = knnHeap[parentHIdx][tidx];
knnHeap[parentHIdx][tidx] = knnHeap[childHIdx][tidx];
knnHeap[childHIdx][tidx] = closeTemp;
// Update indices
parentHIdx = childHIdx;
childHIdx = parentHIdx<<1; // left child of parent
}
}
// Update trim distances
dist2Heap = knnHeap[1][tidx].Dist;
bestDist2 = dist2Heap;
}
}
else if (diffDist2 < dist2Heap)
{
//-------------------------------
// >= k elements on heap
// Do Heap Replacement
//-------------------------------
// Replace Root Element with new element
knnHeap[1][tidx].Id = currIdx;
knnHeap[1][tidx].Dist = diffDist2;
//
// Demote new element (to correct position in heap)
//
unsigned int parentHIdx = 1; // Start at Root
unsigned int childHIdx = 2; // left child of parent
// Compare current index to it's children
while (childHIdx <= maxHeap)
{
// Update Distances
float parentD2 = knnHeap[parentHIdx][tidx].Dist;
float childD2 = knnHeap[childHIdx][tidx].Dist;
// Find largest child
if (childHIdx < maxHeap)
{
float rightD2 = knnHeap[childHIdx+1][tidx].Dist;
if (childD2 < rightD2)
{
// Use right child
childHIdx++;
childD2 = rightD2;
}
}
// Compare largest child to parent
if (parentD2 >= childD2)
{
// Parent node is larger than both children, exit
break;
}
// Demote parent by swapping with it's largest child
GPU_NN_Result closeTemp = knnHeap[parentHIdx][tidx];
knnHeap[parentHIdx][tidx] = knnHeap[childHIdx][tidx];
knnHeap[childHIdx][tidx] = closeTemp;
// Update indices
parentHIdx = childHIdx;
childHIdx = parentHIdx<<1; // left child of parent
}
// Update Trim distances
dist2Heap = knnHeap[1][tidx].Dist;
bestDist2 = dist2Heap;
}
// update bestDist2
if (queryValue <= splitValue)
{
// [...QL...BD]...SV -> Include Left range only
// or
// [...QL...SV...BD] -> Include Both Left and Right Sub Ranges
// Check if we should add Right Sub-range to stack
if (diff2 < bestDist2)
{
//nextIdx = currNodes[tidx].Right;
if (0xFFFFFFFF != currNodes[tidx].Right) // cInvalid
{
// Push Onto top of stack
searchStack[stackTop][tidx].nodeFlags = (currNodes[tidx].Right & 0x1FFFFFFFU) | ((nextAxis << 29) & 0x60000000U) | 0x80000000U;
searchStack[stackTop][tidx].splitVal = splitValue;
stackTop++;
}
}
// Always Add Left Sub-range to search path
//nextIdx = currNodes[tidx].Left;
if (0xFFFFFFFF != currNodes[tidx].Left)
{
// Push Onto top of stack
searchStack[stackTop][tidx].nodeFlags = (currNodes[tidx].Left & 0x1FFFFFFFU) | ((nextAxis << 29) & 0x60000000U); // | 0x80000000U;
searchStack[stackTop][tidx].splitVal = splitValue;
stackTop++;
}
}
else
{
// SV...[BD...QL...] -> Include Right sub range only
// or
// [BD...SV...QL...] -> Include Both Left and Right Sub Ranges
// Check if we should add left sub-range to search path
if (diff2 < bestDist2)
{
// Add to search stack
//nextIdx = currNodes[tidx].Left;
if (0xFFFFFFFFU != currNodes[tidx].Left)
{
// Push Onto top of stack
searchStack[stackTop][tidx].nodeFlags = (currNodes[tidx].Left & 0x1FFFFFFFU) | ((nextAxis << 29) & 0x60000000U) | 0x80000000U;
searchStack[stackTop][tidx].splitVal = splitValue;
stackTop++;
}
}
// Always Add Right Sub-range
//nextIdx = currNodes[tidx].Right;
if (0xFFFFFFFFU != currNodes[tidx].Right)
{
// Push Onto top of stack
searchStack[stackTop][tidx].nodeFlags = (currNodes[tidx].Right & 0x1FFFFFFFU) | ((nextAxis << 29) & 0x60000000U); // | 0x8000000U;
searchStack[stackTop][tidx].splitVal = splitValue;
stackTop++;
}
}
}
//
// Output Results
//
// We now have a heap of the 'k' nearest neighbors
// Write them to the results array
// Assume answers should be stored along z axis of 3 dimensional cube
for (unsigned int i = 0; i < countHeap; i++)
{
unsigned int i1 = i+1;
unsigned int offset = i * nElems;
// Convert Nearest Neighbor Info to final format
// read from slower RAM memory into faster shared memory
knnHeap[i1][tidx].Id = ids[knnHeap[i1][tidx].Id]; // Really need ID's not indexs
knnHeap[i1][tidx].Dist = sqrtf( knnHeap[i1][tidx].Dist ); // Get True distance (not distance squared)
// Store Result
// write from faster shared memory into slower RAM memory
qrs[qidx+offset] = knnHeap[i1][tidx];
}
}
#endif // _GPU_KNN_2D_MED_H_
| 26e515ee88e7eff1d37c77541e804074da9039b0.cu | /*-----------------------------------------------------------------------------
..Name: GPU_KNN_MED.cu
Desc: This file contains the KNN kd-tree GPU kernel
by Shawn Brown ([email protected])
-----------------------------------------------------------------------------*/
#ifndef _GPU_KNN_2D_MED_H_
#define _GPU_KNN_2D_MED_H_
/*---------------------------------------------------------
Includes
---------------------------------------------------------*/
#include <stdio.h>
//#include <float.h>
#include "GPUTREE_API.h"
/*---------------------------------------------------------
Function Definitions
---------------------------------------------------------*/
/*---------------------------------------------------------
Name: GPU_KNN_2D_MED
Desc: Finds the 'k' Nearest Neighbors in
a search set 'S' for each query point in set 'Q'
Notes: 'S' is stored and implemented via a
static balanced cyclical kd-tree.
---------------------------------------------------------*/
__global__ void
GPU_KNN_2D_MED
(
GPU_NN_Result * qrs, // OUT: Results of KD Nearest Neighbor Algorithm
float2 * qps, // IN: query points to compute k nearest neighbors for...
GPUNode_2D_MED * kdTree, // IN: KD Tree (Nodes)
unsigned int * ids, // IN: IDs (from Indexs)
int rootIdx, // IN: index of root node in KD Tree
unsigned int k // IN: number of nearest neighbors to find
)
{
// Per Thread Local Parameters (shared memory)
__shared__ GPUNode_2D_MED currNodes[KNN_THREADS_PER_BLOCK]; // Current kd-tree node
__shared__ GPU_Search searchStack[KNN_STACK_SIZE][KNN_THREADS_PER_BLOCK]; // Search Stack
__shared__ GPU_NN_Result knnHeap[KD_KNN_SIZE][KNN_THREADS_PER_BLOCK]; // 'k' NN Closest Heap
__shared__ float2 queryPoints[KNN_THREADS_PER_BLOCK]; // Query Point
// Per Thread Local Parameters (registers)
unsigned int currIdx, currAxis, currInOut, nextAxis;
unsigned int stackTop, maxHeap, countHeap;
float dx, dy, diff, diff2, diffDist2;
float queryValue, splitValue;
float dist2Heap, bestDist2;
int tidx, width, currRow, currCol, qidx;
// Compute Thread index
tidx = (threadIdx.y*blockDim.x) + threadIdx.x;
// Compute Query Index
width = gridDim.x * blockDim.x;
currRow = (blockIdx.y * blockDim.y) + threadIdx.y;
currCol = (blockIdx.x * blockDim.x) + threadIdx.x;
qidx = (currRow * width) + currCol;
// Load current Query Point
// read from slower RAM into faster shared memory
queryPoints[tidx] = qps[qidx];
// Compute number of elements (in grid)
int height = gridDim.y * blockDim.y;
int nElems = height * width;
// Search Stack Variables
stackTop = 0;
// 'k' NN Heap variables
maxHeap = k; // Maximum # elements on knnHeap
countHeap = 0; // Current # elements on knnHeap
dist2Heap = 0.0f; // Max Dist of any element on heap
bestDist2 = 3.0e38f;
// Put root search info on stack
searchStack[stackTop][tidx].nodeFlags = (rootIdx & 0x1FFFFFFF); // | ((currAxis << 29) & 0x60000000); // | ((currInOut << 31) & 0x8000000);;
searchStack[stackTop][tidx].splitVal = 3.0e+38F;
stackTop++;
while (stackTop != 0)
{
// Statistics
//best.cNodes++;
// Get Current Node from top of stack
stackTop--;
// Get Node Info
currIdx = (searchStack[stackTop][tidx].nodeFlags & 0x1FFFFFFFU);
currAxis = (searchStack[stackTop][tidx].nodeFlags & 0x60000000U) >> 29;
currInOut = (searchStack[stackTop][tidx].nodeFlags & 0x80000000U) >> 31;
nextAxis = ((currAxis == 0) ? 1 : 0);
// Early Exit Check
if (currInOut == 1) // KD_OUT
{
if (countHeap == maxHeap) // Is heap full yet ?!?
{
// Next Line is effectively queryValue = queryPoints[prevAxis];
queryValue = ((currAxis == 0) ? queryPoints[tidx].y : queryPoints[tidx].x);
splitValue = searchStack[stackTop][tidx].splitVal; // Split Value of Parent Node
diff = splitValue - queryValue;
diff2 = diff*diff;
if (diff2 >= dist2Heap)
{
// We can do an early exit for this node
continue;
}
}
}
// WARNING - It's Much faster to load this node from global memory after the "Early Exit check" !!!
// Load current node
// read from slower RAM into faster shared memory
currNodes[tidx] = kdTree[currIdx];
// Get Best Fit Dist for checking child ranges
queryValue = ((currAxis == 0) ? queryPoints[tidx].x : queryPoints[tidx].y);
splitValue = ((currAxis == 0) ? currNodes[tidx].pos[0] : currNodes[tidx].pos[1]);
diff = splitValue - queryValue;
diff2 = diff*diff;
// Calc Dist from Median Node to queryLocation
dx = currNodes[tidx].pos[0] - queryPoints[tidx].x;
dy = currNodes[tidx].pos[1] - queryPoints[tidx].y;
diffDist2 = (dx*dx) + (dy*dy);
// See if we should add this point to the 'k' NN Heap
if (countHeap < maxHeap)
{
//-------------------------------
// < 'k' elements on heap
// Do Simple Array append
//-------------------------------
countHeap++;
knnHeap[countHeap][tidx].Id = currIdx;
knnHeap[countHeap][tidx].Dist = diffDist2;
// Do we need to convert the array into a max distance heap ?!?
if (countHeap == maxHeap)
{
// Yes, turn array into a heap, takes O(k) time
for (unsigned int z = countHeap/2; z >= 1; z--)
{
//
// Demote each element in turn (to correct position in heap)
//
unsigned int parentHIdx = z; // Start at specified element
unsigned int childHIdx = z << 1; // left child of parent
// Compare Parent to it's children
while (childHIdx <= maxHeap)
{
// Update Distances
float parentD2 = knnHeap[parentHIdx][tidx].Dist;
float childD2 = knnHeap[childHIdx][tidx].Dist;
// Find largest child
if (childHIdx < maxHeap)
{
float rightD2 = knnHeap[childHIdx+1][tidx].Dist;
if (childD2 < rightD2)
{
// Use right child
childHIdx++;
childD2 = rightD2;
}
}
// Compare largest child to parent
if (parentD2 >= childD2)
{
// Parent is larger than both children, exit loop
break;
}
// Demote parent by swapping with it's largest child
GPU_NN_Result closeTemp = knnHeap[parentHIdx][tidx];
knnHeap[parentHIdx][tidx] = knnHeap[childHIdx][tidx];
knnHeap[childHIdx][tidx] = closeTemp;
// Update indices
parentHIdx = childHIdx;
childHIdx = parentHIdx<<1; // left child of parent
}
}
// Update trim distances
dist2Heap = knnHeap[1][tidx].Dist;
bestDist2 = dist2Heap;
}
}
else if (diffDist2 < dist2Heap)
{
//-------------------------------
// >= k elements on heap
// Do Heap Replacement
//-------------------------------
// Replace Root Element with new element
knnHeap[1][tidx].Id = currIdx;
knnHeap[1][tidx].Dist = diffDist2;
//
// Demote new element (to correct position in heap)
//
unsigned int parentHIdx = 1; // Start at Root
unsigned int childHIdx = 2; // left child of parent
// Compare current index to it's children
while (childHIdx <= maxHeap)
{
// Update Distances
float parentD2 = knnHeap[parentHIdx][tidx].Dist;
float childD2 = knnHeap[childHIdx][tidx].Dist;
// Find largest child
if (childHIdx < maxHeap)
{
float rightD2 = knnHeap[childHIdx+1][tidx].Dist;
if (childD2 < rightD2)
{
// Use right child
childHIdx++;
childD2 = rightD2;
}
}
// Compare largest child to parent
if (parentD2 >= childD2)
{
// Parent node is larger than both children, exit
break;
}
// Demote parent by swapping with it's largest child
GPU_NN_Result closeTemp = knnHeap[parentHIdx][tidx];
knnHeap[parentHIdx][tidx] = knnHeap[childHIdx][tidx];
knnHeap[childHIdx][tidx] = closeTemp;
// Update indices
parentHIdx = childHIdx;
childHIdx = parentHIdx<<1; // left child of parent
}
// Update Trim distances
dist2Heap = knnHeap[1][tidx].Dist;
bestDist2 = dist2Heap;
}
// update bestDist2
if (queryValue <= splitValue)
{
// [...QL...BD]...SV -> Include Left range only
// or
// [...QL...SV...BD] -> Include Both Left and Right Sub Ranges
// Check if we should add Right Sub-range to stack
if (diff2 < bestDist2)
{
//nextIdx = currNodes[tidx].Right;
if (0xFFFFFFFF != currNodes[tidx].Right) // cInvalid
{
// Push Onto top of stack
searchStack[stackTop][tidx].nodeFlags = (currNodes[tidx].Right & 0x1FFFFFFFU) | ((nextAxis << 29) & 0x60000000U) | 0x80000000U;
searchStack[stackTop][tidx].splitVal = splitValue;
stackTop++;
}
}
// Always Add Left Sub-range to search path
//nextIdx = currNodes[tidx].Left;
if (0xFFFFFFFF != currNodes[tidx].Left)
{
// Push Onto top of stack
searchStack[stackTop][tidx].nodeFlags = (currNodes[tidx].Left & 0x1FFFFFFFU) | ((nextAxis << 29) & 0x60000000U); // | 0x80000000U;
searchStack[stackTop][tidx].splitVal = splitValue;
stackTop++;
}
}
else
{
// SV...[BD...QL...] -> Include Right sub range only
// or
// [BD...SV...QL...] -> Include Both Left and Right Sub Ranges
// Check if we should add left sub-range to search path
if (diff2 < bestDist2)
{
// Add to search stack
//nextIdx = currNodes[tidx].Left;
if (0xFFFFFFFFU != currNodes[tidx].Left)
{
// Push Onto top of stack
searchStack[stackTop][tidx].nodeFlags = (currNodes[tidx].Left & 0x1FFFFFFFU) | ((nextAxis << 29) & 0x60000000U) | 0x80000000U;
searchStack[stackTop][tidx].splitVal = splitValue;
stackTop++;
}
}
// Always Add Right Sub-range
//nextIdx = currNodes[tidx].Right;
if (0xFFFFFFFFU != currNodes[tidx].Right)
{
// Push Onto top of stack
searchStack[stackTop][tidx].nodeFlags = (currNodes[tidx].Right & 0x1FFFFFFFU) | ((nextAxis << 29) & 0x60000000U); // | 0x8000000U;
searchStack[stackTop][tidx].splitVal = splitValue;
stackTop++;
}
}
}
//
// Output Results
//
// We now have a heap of the 'k' nearest neighbors
// Write them to the results array
// Assume answers should be stored along z axis of 3 dimensional cube
for (unsigned int i = 0; i < countHeap; i++)
{
unsigned int i1 = i+1;
unsigned int offset = i * nElems;
// Convert Nearest Neighbor Info to final format
// read from slower RAM memory into faster shared memory
knnHeap[i1][tidx].Id = ids[knnHeap[i1][tidx].Id]; // Really need ID's not indexs
knnHeap[i1][tidx].Dist = sqrtf( knnHeap[i1][tidx].Dist ); // Get True distance (not distance squared)
// Store Result
// write from faster shared memory into slower RAM memory
qrs[qidx+offset] = knnHeap[i1][tidx];
}
}
#endif // _GPU_KNN_2D_MED_H_
|
2c97ecb4e6aceccf888a4eb51b6c1d77113acdd8.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <handheld_object_registration/projected_correspondences.h>
__device__ struct cuRect{
int x;
int y;
int width;
int height;
// cuRect(int i = 0, int j = 0, int w = 0, int h = 0) :
// x(i), y(j), width(w), height(h) {}
};
#define CUDA_ERROR_CHECK(process) { \
cudaAssert((process), __FILE__, __LINE__); \
} \
void cudaAssert(hipError_t code, char *file, int line, bool abort) {
if (code != hipSuccess) {
fprintf(stderr, "GPUassert: %s %s %dn",
hipGetErrorString(code), file, line);
if (abort) {
exit(code);
}
}
}
__host__ __device__ __align__(16)
int cuDivUp(
int a, int b) {
return ((a % b) != 0) ? (a / b + 1) : (a / b);
}
__device__ __forceinline__
void cuConditionROI(cuRect *rect, int width, int height) {
if (rect->x < 0) {
rect->x = 0;
}
if (rect->y < 0) {
rect->y = 0;
}
if ((rect->width + rect->x) > width) {
rect->x -= ((rect->width + rect->x) - width);
}
if ((rect->height + rect->y) > height) {
rect->y -= ((rect->height + rect->y) - height);
}
}
__device__ __forceinline__
float cuEuclideanDistance(float *src_pt, float *model_pt,
const int lenght, bool sqrt_dist = true) {
float sum = 0.0f;
for (int i = 0; i < lenght; i++) {
sum += ((src_pt[i] - model_pt[i]) * (src_pt[i] - model_pt[i]));
}
if (sqrt_dist) {
return sqrtf(sum);
} else {
return sum;
}
}
__global__ __forceinline__
void findCorrespondencesGPU(Correspondence * correspondences,
cuMat<float, NUMBER_OF_ELEMENTS> *d_src_points,
int *d_src_indices,
cuMat<float, NUMBER_OF_ELEMENTS> *d_model_points,
int *d_model_indices, const int im_width,
const int im_height, const int wsize) {
int t_idx = threadIdx.x + blockIdx.x * blockDim.x;
int t_idy = threadIdx.y + blockIdx.y * blockDim.y;
int offset = t_idx + t_idy * blockDim.x * gridDim.x;
//! temp
/*
if (offset == 0) {
for (int i = 0; i < im_width * im_height; i++) {
correspondences[i].query_index = -1;
correspondences[i].match_index = -1;
}
}
__syncthreads();
*/
if (offset < im_width * im_height) {
int model_index = d_model_indices[offset];
if (model_index != -1) {
cuRect rect;
rect.x = t_idx - wsize/2;
rect.y = t_idy - wsize/2;
rect.width = wsize;
rect.height = wsize;
cuConditionROI(&rect, im_width, im_height);
#ifdef _DEBUG
if (model_index == 10) {
printf("\n\nRECT: %d, %d, %d, %d\n",
rect.x, rect.y, rect.width, rect.height);
}
#endif
float model_pt[3];
model_pt[0] = d_model_points[model_index].data[0];
model_pt[1] = d_model_points[model_index].data[1];
model_pt[2] = d_model_points[model_index].data[2];
float min_dsm = FLT_MAX;
int min_ism = -1;
for (int l = rect.y; l < rect.y + rect.height; l++) {
for (int k = rect.x; k < rect.x + rect.width; k++) {
int src_index = d_src_indices[k + (l * im_width)];
if (src_index != -1) {
float src_pt[3];
src_pt[0] = d_src_points[src_index].data[0];
src_pt[1] = d_src_points[src_index].data[1];
src_pt[2] = d_src_points[src_index].data[2];
float dist = cuEuclideanDistance(src_pt, model_pt, 3);
if (dist >= 0.0f && dist < min_dsm && !isnan(dist)
&& dist < DISTANCE_THRESH) {
min_dsm = dist;
min_ism = src_index;
}
}
}
}
if (min_ism != -1 && min_dsm < DISTANCE_THRESH) {
correspondences[model_index].query_index = model_index;
correspondences[model_index].match_index = min_ism;
correspondences[model_index].distance = min_dsm;
} else {
correspondences[model_index].query_index = -1;
correspondences[model_index].match_index = -1;
}
}
}
}
// cuMat<float, NUMBER_OF_ELEMENTS> *src_points;
// cuMat<float, NUMBER_OF_ELEMENTS> *d_src_points;
// int *d_src_indices;
bool allocateCopyDataToGPU(
pcl::Correspondences &corr, float &energy,
bool allocate_src,
const pcl::PointCloud<PointTYPE>::Ptr source_points,
const ProjectionMap &src_projection,
const pcl::PointCloud<PointTYPE>::Ptr target_points,
const ProjectionMap &target_projection) {
if (source_points->empty() || target_points->empty()) {
printf("\033[31m EMPTY POINTCLOUD FOR CORRESPONDENCES \033[0m\n");
return false;
}
int *d_src_indices;
int *d_model_indices;
cuMat<float, NUMBER_OF_ELEMENTS> *d_model_points;
cuMat<float, NUMBER_OF_ELEMENTS> *d_src_points;
const int TGT_SIZE = ::max(
static_cast<int>(target_points->size()),
target_projection.width * target_projection.height);
cuMat<float, NUMBER_OF_ELEMENTS> model_points[TGT_SIZE];
const int SRC_POINT_SIZE = static_cast<float>(source_points->size());
// cuMat<float, NUMBER_OF_ELEMENTS> src_points[SRC_POINT_SIZE];
cuMat<float, NUMBER_OF_ELEMENTS> *src_points;
if (allocate_src) {
src_points = reinterpret_cast<cuMat<float, NUMBER_OF_ELEMENTS>* >(
std::malloc(sizeof(cuMat<float, NUMBER_OF_ELEMENTS>) * SRC_POINT_SIZE));
}
const int SRC_SIZE = IMAGE_WIDTH * IMAGE_HEIGHT;
int src_indices[SRC_SIZE];
int model_indices[SRC_SIZE];
int image_size = IMAGE_WIDTH * IMAGE_HEIGHT;
Correspondence *correspondences = reinterpret_cast<Correspondence*>(
std::malloc(sizeof(Correspondence) * image_size));
// #ifdef _OPENMP
// #pragma omp parallel for num_threads(8) collapse(2)
// #endif
for (int j = 0; j < target_projection.indices.rows; j++) {
for (int i = 0; i < target_projection.indices.cols; i++) {
int idx = i + (j * src_projection.indices.cols);
correspondences[idx].query_index = -1;
correspondences[idx].match_index = -1;
correspondences[idx].distance = FLT_MAX;
int index = target_projection.indices.at<int>(j, i);
if (index != -1) {
float x = target_points->points[index].x;
float y = target_points->points[index].y;
float z = target_points->points[index].z;
if (!isnan(x) && !isnan(y) && !isnan(z)) {
model_points[index].data[0] = x;
model_points[index].data[1] = y;
model_points[index].data[2] = z;
} else {
index = -1;
}
}
model_indices[idx] = index;
if (allocate_src) {
index = -1;
index = src_projection.indices.at<int>(j, i);
if (index != -1) {
float x = source_points->points[index].x;
float y = source_points->points[index].y;
float z = source_points->points[index].z;
if (!isnan(x) && !isnan(y) && !isnan(z)) {
src_points[index].data[0] = x;
src_points[index].data[1] = y;
src_points[index].data[2] = z;
} else {
index = -1;
}
}
src_indices[idx] = index;
}
}
}
int TMP_SIZE = TGT_SIZE * sizeof(cuMat<float, 3>);
hipMalloc(reinterpret_cast<void**>(&d_model_points), TMP_SIZE);
hipMemcpy(d_model_points, model_points, TMP_SIZE, hipMemcpyHostToDevice);
int TIP_SIZE = SRC_SIZE * sizeof(int);
hipMalloc(reinterpret_cast<void**>(&d_model_indices), TIP_SIZE);
hipMemcpy(d_model_indices, model_indices, TIP_SIZE,
hipMemcpyHostToDevice);
if (allocate_src) {
int SMP_SIZE = SRC_POINT_SIZE * sizeof(cuMat<float, 3>);
hipMalloc(reinterpret_cast<void**>(&d_src_points), SMP_SIZE);
hipMemcpy(d_src_points, src_points, SMP_SIZE, hipMemcpyHostToDevice);
int SIP_SIZE = SRC_SIZE * sizeof(int);
hipMalloc(reinterpret_cast<void**>(&d_src_indices), SIP_SIZE);
hipMemcpy(d_src_indices, src_indices, SIP_SIZE,
hipMemcpyHostToDevice);
}
Correspondence *d_correspondences;
hipMalloc(reinterpret_cast<void**>(&d_correspondences),
sizeof(Correspondence) * image_size);
hipMemcpy(d_correspondences, correspondences,
sizeof(Correspondence) * image_size, hipMemcpyHostToDevice);
dim3 block_size(cuDivUp(IMAGE_WIDTH, GRID_SIZE),
cuDivUp(IMAGE_HEIGHT, GRID_SIZE));
dim3 grid_size(GRID_SIZE, GRID_SIZE);
hipLaunchKernelGGL(( findCorrespondencesGPU), dim3(block_size), dim3(grid_size), 0, 0,
d_correspondences, d_src_points, d_src_indices, d_model_points,
d_model_indices, IMAGE_WIDTH, IMAGE_HEIGHT, target_projection.height);
// Correspondence *correspondences = reinterpret_cast<Correspondence*>(
// std::malloc(sizeof(Correspondence) * image_size));
hipMemcpy(correspondences, d_correspondences,
sizeof(Correspondence) * image_size, hipMemcpyDeviceToHost);
// const float max_value = 15.0f;
// const float min_value = -max_value;
energy = 0.0f;
int match_counter = 0;
for (int i = 0; i < image_size; i++) {
if ((correspondences[i].query_index > -1 &&
correspondences[i].query_index < image_size) &&
(correspondences[i].match_index > -1 &&
correspondences[i].match_index < image_size)) {
int model_index = correspondences[i].query_index;
int src_index = correspondences[i].match_index;
PointTYPE model_pt = target_points->points[model_index];
PointTYPE src_pt = source_points->points[src_index];
if (!isnan(model_pt.x) && !isnan(model_pt.y) && !isnan(model_pt.z) &&
!isnan(src_pt.x) && !isnan(src_pt.y) && !isnan(src_pt.z)) {
pcl::Correspondence c;
c.index_query = correspondences[i].query_index;
c.index_match = correspondences[i].match_index;
corr.push_back(c);
energy += correspondences[i].distance;
match_counter++;
}
}
}
energy /= static_cast<float>(match_counter);
energy = (match_counter == 0) ? -1.0f : energy;
free(src_points);
hipFree(d_src_indices);
hipFree(d_src_points);
free(correspondences);
hipFree(d_correspondences);
hipFree(d_model_points);
hipFree(d_model_indices);
return true;
}
/**
* DEBUG
*/
__global__ __forceinline__
void estimateCorrespondencesKernel(Correspondence *correspondences,
float *d_model_points,
int *d_target_indices,
float *d_src_points,
int *d_src_indices,
const int image_size,
const int step_size,
const int wsize) {
int t_idx = threadIdx.x + blockIdx.x * blockDim.x;
int t_idy = threadIdx.y + blockIdx.y * blockDim.y;
int offset = t_idx + t_idy * blockDim.x * gridDim.x;
if (offset < image_size) {
/* for points increment by 12 and 1 for indices */
int model_index = d_target_indices[offset];
#ifdef _DEBUG
if (model_index != -1) {
printf("%d, %d %d \n", model_index, t_idx, t_idy);
}
#endif
if (model_index != -1) {
cuRect rect;
rect.x = t_idx - wsize/2;
rect.y = t_idy - wsize/2;
rect.width = wsize;
rect.height = wsize;
cuConditionROI(&rect, IMAGE_WIDTH, IMAGE_HEIGHT);
int point_index = model_index * POINT_ELEMENTS;
float model_pt[3];
model_pt[0] = d_model_points[point_index];
model_pt[1] = d_model_points[point_index + 1];
model_pt[2] = d_model_points[point_index + 2];
#ifdef _DEBUG
printf("INDEX: %d --- %3.4f, %3.4f, %3.4f\n",
model_index, model_pt[0], model_pt[1], model_pt[2]);
#endif
float min_dsm = FLT_MAX;
int min_ism = -1;
for (int l = rect.y; l < rect.y + rect.height; l++) {
for (int k = rect.x; k < rect.x + rect.width; k++) {
int src_index = d_src_indices[k + (l * IMAGE_WIDTH)];
if (src_index != -1) {
int spoint_index = src_index * POINT_ELEMENTS;
float src_pt[3];
src_pt[0] = d_src_points[spoint_index];
src_pt[1] = d_src_points[spoint_index + 1];
src_pt[2] = d_src_points[spoint_index + 2];
// src_pt[0] = d_src_points[src_index].data[0];
// src_pt[1] = d_src_points[src_index].data[1];
// src_pt[2] = d_src_points[src_index].data[2];
float dist = cuEuclideanDistance(src_pt, model_pt, 3, false);
if (dist >= 0.0f && dist < min_dsm && !isnan(dist)) {
min_dsm = dist;
min_ism = src_index;
}
#ifdef _DEBUG
if (model_index == 10) {
printf("SRC INDEX: %d --- %3.4f, %3.4f, %3.4f ---- %3.4f\n",
src_index, src_pt[0], src_pt[1], src_pt[2], dist);
printf("MATCH INDEX: %d --- %3.4f\n",
min_ism, min_dsm);
}
#endif
}
}
}
if (min_ism != -1 && min_dsm < DISTANCE_THRESH) {
correspondences[model_index].query_index = model_index;
correspondences[model_index].match_index = min_ism;
correspondences[model_index].distance = min_dsm;
} else {
correspondences[model_index].query_index = -1;
correspondences[model_index].match_index = -1;
}
}
}
}
__global__ __forceinline__
void test_kernel(int *data, int image_size) {
int t_idx = threadIdx.x + blockIdx.x * blockDim.x;
int t_idy = threadIdx.y + blockIdx.y * blockDim.y;
int offset = t_idx + t_idy * blockDim.x * gridDim.x;
if (offset < image_size) {
int index = data[offset];
if (index != -1) {
printf("%d, %d %d \n", index, t_idx, t_idy);
}
}
}
bool allocateCopyDataToGPU2(
pcl::Correspondences &corr, float &energy,
bool allocate_src,
const pcl::PointCloud<PointTYPE>::Ptr source_points,
const ProjectionMap &src_projection,
const pcl::PointCloud<PointTYPE>::Ptr target_points,
const ProjectionMap &target_projection) {
if (source_points->empty() || target_points->empty()) {
printf("\033[31m EMPTY POINTCLOUD FOR CORRESPONDENCES \033[0m\n");
return false;
}
const int IN_TSIZE = static_cast<int>(target_points->size());
const int IMG_SIZE = target_projection.indices.cols *
target_projection.indices.rows;
dim3 block_size(cuDivUp(IMAGE_WIDTH, GRID_SIZE),
cuDivUp(IMAGE_HEIGHT, GRID_SIZE));
dim3 grid_size(GRID_SIZE, GRID_SIZE);
//! copy model data
float *d_target_points;
hipMalloc(reinterpret_cast<void**>(&d_target_points),
IN_TSIZE * sizeof(float) * POINT_ELEMENTS);
hipMemcpy(d_target_points, target_points->points.data(),
IN_TSIZE * sizeof(float) * POINT_ELEMENTS,
hipMemcpyHostToDevice);
const int TARGET_BYTE = target_projection.indices.step *
target_projection.indices.rows;
int *d_target_indices;
hipMalloc(reinterpret_cast<void**>(&d_target_indices), TARGET_BYTE);
// int *tdata = reinterpret_cast<int*>(target_projection.indices.data);
hipMemcpy(d_target_indices,
reinterpret_cast<int*>(target_projection.indices.data),
TARGET_BYTE, hipMemcpyHostToDevice);
float *d_src_points;
int *d_src_indices;
if (allocate_src) {
// hipFree(d_src_points);
// hipFree(d_src_indices);
const int IN_SSIZE = static_cast<int>(source_points->size());
hipMalloc(reinterpret_cast<void**>(&d_src_points),
IN_SSIZE * sizeof(float) * POINT_ELEMENTS);
hipMemcpy(d_src_points, source_points->points.data(),
IN_SSIZE * sizeof(float) * POINT_ELEMENTS,
hipMemcpyHostToDevice);
const int SRC_BYTE = src_projection.indices.step *
src_projection.indices.rows;
hipMalloc(reinterpret_cast<void**>(&d_src_indices), SRC_BYTE);
hipMemcpy(d_src_indices, reinterpret_cast<int*>(
src_projection.indices.data), SRC_BYTE,
hipMemcpyHostToDevice);
}
Correspondence *d_correspondences;
hipMalloc(reinterpret_cast<void**>(&d_correspondences),
sizeof(Correspondence) * IN_TSIZE);
hipLaunchKernelGGL(( estimateCorrespondencesKernel), dim3(block_size), dim3(grid_size), 0, 0,
d_correspondences, d_target_points, d_target_indices,
d_src_points, d_src_indices, IMG_SIZE,
target_projection.indices.step, target_projection.height);
// for (int i = 0; i < target_points->size(); i++) {
// std::cout << target_points->points[i] << "\n";
// }
// return -1;
Correspondence *correspondences = reinterpret_cast<Correspondence*>(
std::malloc(sizeof(Correspondence) * IN_TSIZE));
hipMemcpy(correspondences, d_correspondences,
sizeof(Correspondence) * IN_TSIZE, hipMemcpyDeviceToHost);
energy = 0.0f;
int match_counter = 0;
for (int i = 0; i < IN_TSIZE; i++) {
if ((correspondences[i].query_index > -1 &&
correspondences[i].query_index < IMG_SIZE) &&
(correspondences[i].match_index > -1 &&
correspondences[i].match_index < IMG_SIZE)) {
int model_index = correspondences[i].query_index;
int src_index = correspondences[i].match_index;
PointTYPE model_pt = target_points->points[model_index];
PointTYPE src_pt = source_points->points[src_index];
if (!isnan(model_pt.x) && !isnan(model_pt.y) && !isnan(model_pt.z) &&
!isnan(src_pt.x) && !isnan(src_pt.y) && !isnan(src_pt.z)) {
pcl::Correspondence c;
c.index_query = correspondences[i].query_index;
c.index_match = correspondences[i].match_index;
corr.push_back(c);
#ifdef _DEBUG
std::cout << correspondences[i].query_index << ", ";
std::cout << correspondences[i].match_index << ", ";
std::cout << correspondences[i].distance << "\n";
#endif
energy += correspondences[i].distance;
match_counter++;
}
}
}
energy /= static_cast<float>(match_counter);
energy = (match_counter == 0) ? -1.0f : energy;
std::cout << "\nENERGY: " << energy << "\t" << match_counter << "\n";
hipFree(d_src_indices);
hipFree(d_src_points);
free(correspondences);
hipFree(d_correspondences);
hipFree(d_target_points);
hipFree(d_target_indices);
return true;
}
void cudaGlobalAllocFree() {
// hipFree(d_src_indices);
// hipFree(d_src_points);
// free(src_points);
}
| 2c97ecb4e6aceccf888a4eb51b6c1d77113acdd8.cu |
#include <handheld_object_registration/projected_correspondences.h>
__device__ struct cuRect{
int x;
int y;
int width;
int height;
// cuRect(int i = 0, int j = 0, int w = 0, int h = 0) :
// x(i), y(j), width(w), height(h) {}
};
#define CUDA_ERROR_CHECK(process) { \
cudaAssert((process), __FILE__, __LINE__); \
} \
void cudaAssert(cudaError_t code, char *file, int line, bool abort) {
if (code != cudaSuccess) {
fprintf(stderr, "GPUassert: %s %s %dn",
cudaGetErrorString(code), file, line);
if (abort) {
exit(code);
}
}
}
__host__ __device__ __align__(16)
int cuDivUp(
int a, int b) {
return ((a % b) != 0) ? (a / b + 1) : (a / b);
}
__device__ __forceinline__
void cuConditionROI(cuRect *rect, int width, int height) {
if (rect->x < 0) {
rect->x = 0;
}
if (rect->y < 0) {
rect->y = 0;
}
if ((rect->width + rect->x) > width) {
rect->x -= ((rect->width + rect->x) - width);
}
if ((rect->height + rect->y) > height) {
rect->y -= ((rect->height + rect->y) - height);
}
}
__device__ __forceinline__
float cuEuclideanDistance(float *src_pt, float *model_pt,
const int lenght, bool sqrt_dist = true) {
float sum = 0.0f;
for (int i = 0; i < lenght; i++) {
sum += ((src_pt[i] - model_pt[i]) * (src_pt[i] - model_pt[i]));
}
if (sqrt_dist) {
return sqrtf(sum);
} else {
return sum;
}
}
__global__ __forceinline__
void findCorrespondencesGPU(Correspondence * correspondences,
cuMat<float, NUMBER_OF_ELEMENTS> *d_src_points,
int *d_src_indices,
cuMat<float, NUMBER_OF_ELEMENTS> *d_model_points,
int *d_model_indices, const int im_width,
const int im_height, const int wsize) {
int t_idx = threadIdx.x + blockIdx.x * blockDim.x;
int t_idy = threadIdx.y + blockIdx.y * blockDim.y;
int offset = t_idx + t_idy * blockDim.x * gridDim.x;
//! temp
/*
if (offset == 0) {
for (int i = 0; i < im_width * im_height; i++) {
correspondences[i].query_index = -1;
correspondences[i].match_index = -1;
}
}
__syncthreads();
*/
if (offset < im_width * im_height) {
int model_index = d_model_indices[offset];
if (model_index != -1) {
cuRect rect;
rect.x = t_idx - wsize/2;
rect.y = t_idy - wsize/2;
rect.width = wsize;
rect.height = wsize;
cuConditionROI(&rect, im_width, im_height);
#ifdef _DEBUG
if (model_index == 10) {
printf("\n\nRECT: %d, %d, %d, %d\n",
rect.x, rect.y, rect.width, rect.height);
}
#endif
float model_pt[3];
model_pt[0] = d_model_points[model_index].data[0];
model_pt[1] = d_model_points[model_index].data[1];
model_pt[2] = d_model_points[model_index].data[2];
float min_dsm = FLT_MAX;
int min_ism = -1;
for (int l = rect.y; l < rect.y + rect.height; l++) {
for (int k = rect.x; k < rect.x + rect.width; k++) {
int src_index = d_src_indices[k + (l * im_width)];
if (src_index != -1) {
float src_pt[3];
src_pt[0] = d_src_points[src_index].data[0];
src_pt[1] = d_src_points[src_index].data[1];
src_pt[2] = d_src_points[src_index].data[2];
float dist = cuEuclideanDistance(src_pt, model_pt, 3);
if (dist >= 0.0f && dist < min_dsm && !isnan(dist)
&& dist < DISTANCE_THRESH) {
min_dsm = dist;
min_ism = src_index;
}
}
}
}
if (min_ism != -1 && min_dsm < DISTANCE_THRESH) {
correspondences[model_index].query_index = model_index;
correspondences[model_index].match_index = min_ism;
correspondences[model_index].distance = min_dsm;
} else {
correspondences[model_index].query_index = -1;
correspondences[model_index].match_index = -1;
}
}
}
}
// cuMat<float, NUMBER_OF_ELEMENTS> *src_points;
// cuMat<float, NUMBER_OF_ELEMENTS> *d_src_points;
// int *d_src_indices;
bool allocateCopyDataToGPU(
pcl::Correspondences &corr, float &energy,
bool allocate_src,
const pcl::PointCloud<PointTYPE>::Ptr source_points,
const ProjectionMap &src_projection,
const pcl::PointCloud<PointTYPE>::Ptr target_points,
const ProjectionMap &target_projection) {
if (source_points->empty() || target_points->empty()) {
printf("\033[31m EMPTY POINTCLOUD FOR CORRESPONDENCES \033[0m\n");
return false;
}
int *d_src_indices;
int *d_model_indices;
cuMat<float, NUMBER_OF_ELEMENTS> *d_model_points;
cuMat<float, NUMBER_OF_ELEMENTS> *d_src_points;
const int TGT_SIZE = std::max(
static_cast<int>(target_points->size()),
target_projection.width * target_projection.height);
cuMat<float, NUMBER_OF_ELEMENTS> model_points[TGT_SIZE];
const int SRC_POINT_SIZE = static_cast<float>(source_points->size());
// cuMat<float, NUMBER_OF_ELEMENTS> src_points[SRC_POINT_SIZE];
cuMat<float, NUMBER_OF_ELEMENTS> *src_points;
if (allocate_src) {
src_points = reinterpret_cast<cuMat<float, NUMBER_OF_ELEMENTS>* >(
std::malloc(sizeof(cuMat<float, NUMBER_OF_ELEMENTS>) * SRC_POINT_SIZE));
}
const int SRC_SIZE = IMAGE_WIDTH * IMAGE_HEIGHT;
int src_indices[SRC_SIZE];
int model_indices[SRC_SIZE];
int image_size = IMAGE_WIDTH * IMAGE_HEIGHT;
Correspondence *correspondences = reinterpret_cast<Correspondence*>(
std::malloc(sizeof(Correspondence) * image_size));
// #ifdef _OPENMP
// #pragma omp parallel for num_threads(8) collapse(2)
// #endif
for (int j = 0; j < target_projection.indices.rows; j++) {
for (int i = 0; i < target_projection.indices.cols; i++) {
int idx = i + (j * src_projection.indices.cols);
correspondences[idx].query_index = -1;
correspondences[idx].match_index = -1;
correspondences[idx].distance = FLT_MAX;
int index = target_projection.indices.at<int>(j, i);
if (index != -1) {
float x = target_points->points[index].x;
float y = target_points->points[index].y;
float z = target_points->points[index].z;
if (!isnan(x) && !isnan(y) && !isnan(z)) {
model_points[index].data[0] = x;
model_points[index].data[1] = y;
model_points[index].data[2] = z;
} else {
index = -1;
}
}
model_indices[idx] = index;
if (allocate_src) {
index = -1;
index = src_projection.indices.at<int>(j, i);
if (index != -1) {
float x = source_points->points[index].x;
float y = source_points->points[index].y;
float z = source_points->points[index].z;
if (!isnan(x) && !isnan(y) && !isnan(z)) {
src_points[index].data[0] = x;
src_points[index].data[1] = y;
src_points[index].data[2] = z;
} else {
index = -1;
}
}
src_indices[idx] = index;
}
}
}
int TMP_SIZE = TGT_SIZE * sizeof(cuMat<float, 3>);
cudaMalloc(reinterpret_cast<void**>(&d_model_points), TMP_SIZE);
cudaMemcpy(d_model_points, model_points, TMP_SIZE, cudaMemcpyHostToDevice);
int TIP_SIZE = SRC_SIZE * sizeof(int);
cudaMalloc(reinterpret_cast<void**>(&d_model_indices), TIP_SIZE);
cudaMemcpy(d_model_indices, model_indices, TIP_SIZE,
cudaMemcpyHostToDevice);
if (allocate_src) {
int SMP_SIZE = SRC_POINT_SIZE * sizeof(cuMat<float, 3>);
cudaMalloc(reinterpret_cast<void**>(&d_src_points), SMP_SIZE);
cudaMemcpy(d_src_points, src_points, SMP_SIZE, cudaMemcpyHostToDevice);
int SIP_SIZE = SRC_SIZE * sizeof(int);
cudaMalloc(reinterpret_cast<void**>(&d_src_indices), SIP_SIZE);
cudaMemcpy(d_src_indices, src_indices, SIP_SIZE,
cudaMemcpyHostToDevice);
}
Correspondence *d_correspondences;
cudaMalloc(reinterpret_cast<void**>(&d_correspondences),
sizeof(Correspondence) * image_size);
cudaMemcpy(d_correspondences, correspondences,
sizeof(Correspondence) * image_size, cudaMemcpyHostToDevice);
dim3 block_size(cuDivUp(IMAGE_WIDTH, GRID_SIZE),
cuDivUp(IMAGE_HEIGHT, GRID_SIZE));
dim3 grid_size(GRID_SIZE, GRID_SIZE);
findCorrespondencesGPU<<<block_size, grid_size>>>(
d_correspondences, d_src_points, d_src_indices, d_model_points,
d_model_indices, IMAGE_WIDTH, IMAGE_HEIGHT, target_projection.height);
// Correspondence *correspondences = reinterpret_cast<Correspondence*>(
// std::malloc(sizeof(Correspondence) * image_size));
cudaMemcpy(correspondences, d_correspondences,
sizeof(Correspondence) * image_size, cudaMemcpyDeviceToHost);
// const float max_value = 15.0f;
// const float min_value = -max_value;
energy = 0.0f;
int match_counter = 0;
for (int i = 0; i < image_size; i++) {
if ((correspondences[i].query_index > -1 &&
correspondences[i].query_index < image_size) &&
(correspondences[i].match_index > -1 &&
correspondences[i].match_index < image_size)) {
int model_index = correspondences[i].query_index;
int src_index = correspondences[i].match_index;
PointTYPE model_pt = target_points->points[model_index];
PointTYPE src_pt = source_points->points[src_index];
if (!isnan(model_pt.x) && !isnan(model_pt.y) && !isnan(model_pt.z) &&
!isnan(src_pt.x) && !isnan(src_pt.y) && !isnan(src_pt.z)) {
pcl::Correspondence c;
c.index_query = correspondences[i].query_index;
c.index_match = correspondences[i].match_index;
corr.push_back(c);
energy += correspondences[i].distance;
match_counter++;
}
}
}
energy /= static_cast<float>(match_counter);
energy = (match_counter == 0) ? -1.0f : energy;
free(src_points);
cudaFree(d_src_indices);
cudaFree(d_src_points);
free(correspondences);
cudaFree(d_correspondences);
cudaFree(d_model_points);
cudaFree(d_model_indices);
return true;
}
/**
* DEBUG
*/
__global__ __forceinline__
void estimateCorrespondencesKernel(Correspondence *correspondences,
float *d_model_points,
int *d_target_indices,
float *d_src_points,
int *d_src_indices,
const int image_size,
const int step_size,
const int wsize) {
int t_idx = threadIdx.x + blockIdx.x * blockDim.x;
int t_idy = threadIdx.y + blockIdx.y * blockDim.y;
int offset = t_idx + t_idy * blockDim.x * gridDim.x;
if (offset < image_size) {
/* for points increment by 12 and 1 for indices */
int model_index = d_target_indices[offset];
#ifdef _DEBUG
if (model_index != -1) {
printf("%d, %d %d \n", model_index, t_idx, t_idy);
}
#endif
if (model_index != -1) {
cuRect rect;
rect.x = t_idx - wsize/2;
rect.y = t_idy - wsize/2;
rect.width = wsize;
rect.height = wsize;
cuConditionROI(&rect, IMAGE_WIDTH, IMAGE_HEIGHT);
int point_index = model_index * POINT_ELEMENTS;
float model_pt[3];
model_pt[0] = d_model_points[point_index];
model_pt[1] = d_model_points[point_index + 1];
model_pt[2] = d_model_points[point_index + 2];
#ifdef _DEBUG
printf("INDEX: %d --- %3.4f, %3.4f, %3.4f\n",
model_index, model_pt[0], model_pt[1], model_pt[2]);
#endif
float min_dsm = FLT_MAX;
int min_ism = -1;
for (int l = rect.y; l < rect.y + rect.height; l++) {
for (int k = rect.x; k < rect.x + rect.width; k++) {
int src_index = d_src_indices[k + (l * IMAGE_WIDTH)];
if (src_index != -1) {
int spoint_index = src_index * POINT_ELEMENTS;
float src_pt[3];
src_pt[0] = d_src_points[spoint_index];
src_pt[1] = d_src_points[spoint_index + 1];
src_pt[2] = d_src_points[spoint_index + 2];
// src_pt[0] = d_src_points[src_index].data[0];
// src_pt[1] = d_src_points[src_index].data[1];
// src_pt[2] = d_src_points[src_index].data[2];
float dist = cuEuclideanDistance(src_pt, model_pt, 3, false);
if (dist >= 0.0f && dist < min_dsm && !isnan(dist)) {
min_dsm = dist;
min_ism = src_index;
}
#ifdef _DEBUG
if (model_index == 10) {
printf("SRC INDEX: %d --- %3.4f, %3.4f, %3.4f ---- %3.4f\n",
src_index, src_pt[0], src_pt[1], src_pt[2], dist);
printf("MATCH INDEX: %d --- %3.4f\n",
min_ism, min_dsm);
}
#endif
}
}
}
if (min_ism != -1 && min_dsm < DISTANCE_THRESH) {
correspondences[model_index].query_index = model_index;
correspondences[model_index].match_index = min_ism;
correspondences[model_index].distance = min_dsm;
} else {
correspondences[model_index].query_index = -1;
correspondences[model_index].match_index = -1;
}
}
}
}
__global__ __forceinline__
void test_kernel(int *data, int image_size) {
int t_idx = threadIdx.x + blockIdx.x * blockDim.x;
int t_idy = threadIdx.y + blockIdx.y * blockDim.y;
int offset = t_idx + t_idy * blockDim.x * gridDim.x;
if (offset < image_size) {
int index = data[offset];
if (index != -1) {
printf("%d, %d %d \n", index, t_idx, t_idy);
}
}
}
bool allocateCopyDataToGPU2(
pcl::Correspondences &corr, float &energy,
bool allocate_src,
const pcl::PointCloud<PointTYPE>::Ptr source_points,
const ProjectionMap &src_projection,
const pcl::PointCloud<PointTYPE>::Ptr target_points,
const ProjectionMap &target_projection) {
if (source_points->empty() || target_points->empty()) {
printf("\033[31m EMPTY POINTCLOUD FOR CORRESPONDENCES \033[0m\n");
return false;
}
const int IN_TSIZE = static_cast<int>(target_points->size());
const int IMG_SIZE = target_projection.indices.cols *
target_projection.indices.rows;
dim3 block_size(cuDivUp(IMAGE_WIDTH, GRID_SIZE),
cuDivUp(IMAGE_HEIGHT, GRID_SIZE));
dim3 grid_size(GRID_SIZE, GRID_SIZE);
//! copy model data
float *d_target_points;
cudaMalloc(reinterpret_cast<void**>(&d_target_points),
IN_TSIZE * sizeof(float) * POINT_ELEMENTS);
cudaMemcpy(d_target_points, target_points->points.data(),
IN_TSIZE * sizeof(float) * POINT_ELEMENTS,
cudaMemcpyHostToDevice);
const int TARGET_BYTE = target_projection.indices.step *
target_projection.indices.rows;
int *d_target_indices;
cudaMalloc(reinterpret_cast<void**>(&d_target_indices), TARGET_BYTE);
// int *tdata = reinterpret_cast<int*>(target_projection.indices.data);
cudaMemcpy(d_target_indices,
reinterpret_cast<int*>(target_projection.indices.data),
TARGET_BYTE, cudaMemcpyHostToDevice);
float *d_src_points;
int *d_src_indices;
if (allocate_src) {
// cudaFree(d_src_points);
// cudaFree(d_src_indices);
const int IN_SSIZE = static_cast<int>(source_points->size());
cudaMalloc(reinterpret_cast<void**>(&d_src_points),
IN_SSIZE * sizeof(float) * POINT_ELEMENTS);
cudaMemcpy(d_src_points, source_points->points.data(),
IN_SSIZE * sizeof(float) * POINT_ELEMENTS,
cudaMemcpyHostToDevice);
const int SRC_BYTE = src_projection.indices.step *
src_projection.indices.rows;
cudaMalloc(reinterpret_cast<void**>(&d_src_indices), SRC_BYTE);
cudaMemcpy(d_src_indices, reinterpret_cast<int*>(
src_projection.indices.data), SRC_BYTE,
cudaMemcpyHostToDevice);
}
Correspondence *d_correspondences;
cudaMalloc(reinterpret_cast<void**>(&d_correspondences),
sizeof(Correspondence) * IN_TSIZE);
estimateCorrespondencesKernel<<<block_size, grid_size>>>(
d_correspondences, d_target_points, d_target_indices,
d_src_points, d_src_indices, IMG_SIZE,
target_projection.indices.step, target_projection.height);
// for (int i = 0; i < target_points->size(); i++) {
// std::cout << target_points->points[i] << "\n";
// }
// return -1;
Correspondence *correspondences = reinterpret_cast<Correspondence*>(
std::malloc(sizeof(Correspondence) * IN_TSIZE));
cudaMemcpy(correspondences, d_correspondences,
sizeof(Correspondence) * IN_TSIZE, cudaMemcpyDeviceToHost);
energy = 0.0f;
int match_counter = 0;
for (int i = 0; i < IN_TSIZE; i++) {
if ((correspondences[i].query_index > -1 &&
correspondences[i].query_index < IMG_SIZE) &&
(correspondences[i].match_index > -1 &&
correspondences[i].match_index < IMG_SIZE)) {
int model_index = correspondences[i].query_index;
int src_index = correspondences[i].match_index;
PointTYPE model_pt = target_points->points[model_index];
PointTYPE src_pt = source_points->points[src_index];
if (!isnan(model_pt.x) && !isnan(model_pt.y) && !isnan(model_pt.z) &&
!isnan(src_pt.x) && !isnan(src_pt.y) && !isnan(src_pt.z)) {
pcl::Correspondence c;
c.index_query = correspondences[i].query_index;
c.index_match = correspondences[i].match_index;
corr.push_back(c);
#ifdef _DEBUG
std::cout << correspondences[i].query_index << ", ";
std::cout << correspondences[i].match_index << ", ";
std::cout << correspondences[i].distance << "\n";
#endif
energy += correspondences[i].distance;
match_counter++;
}
}
}
energy /= static_cast<float>(match_counter);
energy = (match_counter == 0) ? -1.0f : energy;
std::cout << "\nENERGY: " << energy << "\t" << match_counter << "\n";
cudaFree(d_src_indices);
cudaFree(d_src_points);
free(correspondences);
cudaFree(d_correspondences);
cudaFree(d_target_points);
cudaFree(d_target_indices);
return true;
}
void cudaGlobalAllocFree() {
// cudaFree(d_src_indices);
// cudaFree(d_src_points);
// free(src_points);
}
|
e43a0268cadfb6f08e50dbccd2f64ea0205de1c8.hip | // !!! This is a file automatically generated by hipify!!!
#include <stdio.h>
#include <stdlib.h>
#include <quda_internal.h>
#include <color_spinor_field.h>
#include <blas_quda.h>
#include <test_util.h>
#include <face_quda.h>
// include because of nasty globals used in the tests
#include <dslash_util.h>
// google test
#include <gtest.h>
extern QudaDslashType dslash_type;
extern QudaInverterType inv_type;
extern int nvec;
extern bool tune;
extern int device;
extern int xdim;
extern int ydim;
extern int zdim;
extern int tdim;
extern int gridsize_from_cmdline[];
extern int niter;
extern bool tune;
extern bool verify_results;
extern int Nsrc;
extern int Msrc;
extern void usage(char** );
const int Nkernels = 37;
using namespace quda;
ColorSpinorField *xH, *yH, *zH, *wH, *vH, *hH, *lH;
ColorSpinorField *xD, *yD, *zD, *wD, *vD, *hD, *lD, *xmD, *ymD, *zmD;
std::vector<cpuColorSpinorField*> xmH;
std::vector<cpuColorSpinorField*> ymH;
std::vector<cpuColorSpinorField*> zmH;
int Nspin;
int Ncolor;
void setPrec(ColorSpinorParam ¶m, const QudaPrecision precision)
{
param.precision = precision;
if (Nspin == 1 || Nspin == 2 || precision == QUDA_DOUBLE_PRECISION) {
param.fieldOrder = QUDA_FLOAT2_FIELD_ORDER;
} else {
param.fieldOrder = QUDA_FLOAT4_FIELD_ORDER;
}
}
void
display_test_info()
{
printfQuda("running the following test:\n");
printfQuda("S_dimension T_dimension Nspin Ncolor\n");
printfQuda("%3d /%3d / %3d %3d %d %d\n", xdim, ydim, zdim, tdim, Nspin, Ncolor);
printfQuda("Grid partition info: X Y Z T\n");
printfQuda(" %d %d %d %d\n",
dimPartitioned(0),
dimPartitioned(1),
dimPartitioned(2),
dimPartitioned(3));
return;
}
int Nprec = 3;
bool skip_kernel(int precision, int kernel) {
if ( Nspin == 2 && precision == 0) {
// avoid half precision tests if doing coarse fields
return true;
} else if (Nspin == 2 && kernel == 1) {
// avoid low-precision copy if doing coarse fields
return true;
} else if (Ncolor != 3 && (kernel == 31 || kernel == 32)) {
// only benchmark heavy-quark norm if doing 3 colors
return true;
} else if ((Nprec < 3) && (kernel == 0)) {
// only benchmark high-precision copy() if double is supported
return true;
}
return false;
}
void initFields(int prec)
{
// precisions used for the source field in the copyCuda() benchmark
QudaPrecision high_aux_prec = QUDA_INVALID_PRECISION;
QudaPrecision low_aux_prec = QUDA_INVALID_PRECISION;
ColorSpinorParam param;
param.nColor = Ncolor;
param.nSpin = Nspin;
param.nDim = 4; // number of spacetime dimensions
param.pad = 0; // padding must be zero for cpu fields
param.siteSubset = QUDA_PARITY_SITE_SUBSET;
if (param.siteSubset == QUDA_PARITY_SITE_SUBSET) param.x[0] = xdim/2;
else param.x[0] = xdim;
param.x[1] = ydim;
param.x[2] = zdim;
param.x[3] = tdim;
param.siteOrder = QUDA_EVEN_ODD_SITE_ORDER;
param.gammaBasis = QUDA_DEGRAND_ROSSI_GAMMA_BASIS;
param.precision = QUDA_DOUBLE_PRECISION;
param.fieldOrder = QUDA_SPACE_SPIN_COLOR_FIELD_ORDER;
param.create = QUDA_ZERO_FIELD_CREATE;
vH = new cpuColorSpinorField(param);
wH = new cpuColorSpinorField(param);
xH = new cpuColorSpinorField(param);
yH = new cpuColorSpinorField(param);
zH = new cpuColorSpinorField(param);
hH = new cpuColorSpinorField(param);
lH = new cpuColorSpinorField(param);
// create composite fields
// xmH = new cpuColorSpinorField(param);
// ymH = new cpuColorSpinorField(param);
xmH.reserve(Nsrc);
for (int cid = 0; cid < Nsrc; cid++) xmH.push_back(new cpuColorSpinorField(param));
ymH.reserve(Msrc);
for (int cid = 0; cid < Msrc; cid++) ymH.push_back(new cpuColorSpinorField(param));
zmH.reserve(Nsrc);
for (int cid = 0; cid < Nsrc; cid++) zmH.push_back(new cpuColorSpinorField(param));
static_cast<cpuColorSpinorField*>(vH)->Source(QUDA_RANDOM_SOURCE, 0, 0, 0);
static_cast<cpuColorSpinorField*>(wH)->Source(QUDA_RANDOM_SOURCE, 0, 0, 0);
static_cast<cpuColorSpinorField*>(xH)->Source(QUDA_RANDOM_SOURCE, 0, 0, 0);
static_cast<cpuColorSpinorField*>(yH)->Source(QUDA_RANDOM_SOURCE, 0, 0, 0);
static_cast<cpuColorSpinorField*>(zH)->Source(QUDA_RANDOM_SOURCE, 0, 0, 0);
static_cast<cpuColorSpinorField*>(hH)->Source(QUDA_RANDOM_SOURCE, 0, 0, 0);
static_cast<cpuColorSpinorField*>(lH)->Source(QUDA_RANDOM_SOURCE, 0, 0, 0);
for(int i=0; i<Nsrc; i++){
static_cast<cpuColorSpinorField*>(xmH[i])->Source(QUDA_RANDOM_SOURCE, 0, 0, 0);
}
for(int i=0; i<Msrc; i++){
static_cast<cpuColorSpinorField*>(ymH[i])->Source(QUDA_RANDOM_SOURCE, 0, 0, 0);
}
// Now set the parameters for the cuda fields
//param.pad = xdim*ydim*zdim/2;
if (param.nSpin == 4) param.gammaBasis = QUDA_UKQCD_GAMMA_BASIS;
param.create = QUDA_ZERO_FIELD_CREATE;
switch(prec) {
case 0:
setPrec(param, QUDA_HALF_PRECISION);
high_aux_prec = QUDA_DOUBLE_PRECISION;
low_aux_prec = QUDA_SINGLE_PRECISION;
break;
case 1:
setPrec(param, QUDA_SINGLE_PRECISION);
high_aux_prec = QUDA_DOUBLE_PRECISION;
low_aux_prec = QUDA_HALF_PRECISION;
break;
case 2:
setPrec(param, QUDA_DOUBLE_PRECISION);
high_aux_prec = QUDA_SINGLE_PRECISION;
low_aux_prec = QUDA_HALF_PRECISION;
break;
default:
errorQuda("Precision option not defined");
}
checkCudaError();
vD = new cudaColorSpinorField(param);
wD = new cudaColorSpinorField(param);
xD = new cudaColorSpinorField(param);
yD = new cudaColorSpinorField(param);
zD = new cudaColorSpinorField(param);
param.is_composite = true;
param.is_component = false;
// create composite fields
param.composite_dim = Nsrc;
xmD = new cudaColorSpinorField(param);
param.composite_dim = Msrc;
ymD = new cudaColorSpinorField(param);
param.composite_dim = Nsrc;
zmD = new cudaColorSpinorField(param);
param.is_composite = false;
param.is_component = false;
param.composite_dim = 1;
setPrec(param, high_aux_prec);
hD = new cudaColorSpinorField(param);
setPrec(param, low_aux_prec);
lD = new cudaColorSpinorField(param);
// check for successful allocation
checkCudaError();
// only do copy if not doing half precision with mg
bool flag = !(param.nSpin == 2 &&
(prec == 0 || low_aux_prec == QUDA_HALF_PRECISION) );
if ( flag ) {
*vD = *vH;
*wD = *wH;
*xD = *xH;
*yD = *yH;
*zD = *zH;
*hD = *hH;
*lD = *lH;
// for (int i=0; i < Nsrc; i++){
// xmD->Component(i) = *(xmH[i]);
// ymD->Component(i) = *(ymH[i]);
// }
// *ymD = *ymH;
}
}
void freeFields()
{
// release memory
delete vD;
delete wD;
delete xD;
delete yD;
delete zD;
delete hD;
delete lD;
delete xmD;
delete ymD;
delete zmD;
// release memory
delete vH;
delete wH;
delete xH;
delete yH;
delete zH;
delete hH;
delete lH;
for (int i=0; i < Nsrc; i++) delete xmH[i];
for (int i=0; i < Msrc; i++) delete ymH[i];
for (int i=0; i < Nsrc; i++) delete zmH[i];
xmH.clear();
ymH.clear();
zmH.clear();
}
double benchmark(int kernel, const int niter) {
double a, b, c;
quda::Complex a2, b2, c2;
quda::Complex * A = new quda::Complex[Nsrc*Msrc];
quda::Complex * B = new quda::Complex[Nsrc*Msrc];
quda::Complex * C = new quda::Complex[Nsrc*Msrc];
hipEvent_t start, end;
hipEventCreate(&start);
hipEventCreate(&end);
hipEventRecord(start, 0);
{
switch (kernel) {
case 0:
for (int i=0; i < niter; ++i) blas::copy(*yD, *hD);
break;
case 1:
for (int i=0; i < niter; ++i) blas::copy(*yD, *lD);
break;
case 2:
for (int i=0; i < niter; ++i) blas::axpby(a, *xD, b, *yD);
break;
case 3:
for (int i=0; i < niter; ++i) blas::xpy(*xD, *yD);
break;
case 4:
for (int i=0; i < niter; ++i) blas::axpy(a, *xD, *yD);
break;
case 5:
for (int i=0; i < niter; ++i) blas::xpay(*xD, a, *yD);
break;
case 6:
for (int i=0; i < niter; ++i) blas::mxpy(*xD, *yD);
break;
case 7:
for (int i=0; i < niter; ++i) blas::ax(a, *xD);
break;
case 8:
for (int i=0; i < niter; ++i) blas::caxpy(a2, *xD, *yD);
break;
case 9:
for (int i=0; i < niter; ++i) blas::caxpby(a2, *xD, b2, *yD);
break;
case 10:
for (int i=0; i < niter; ++i) blas::cxpaypbz(*xD, a2, *yD, b2, *zD);
break;
case 11:
for (int i=0; i < niter; ++i) blas::axpyBzpcx(a, *xD, *yD, b, *zD, c);
break;
case 12:
for (int i=0; i < niter; ++i) blas::axpyZpbx(a, *xD, *yD, *zD, b);
break;
case 13:
for (int i=0; i < niter; ++i) blas::caxpbypzYmbw(a2, *xD, b2, *yD, *zD, *wD);
break;
case 14:
for (int i=0; i < niter; ++i) blas::cabxpyAx(a, b2, *xD, *yD);
break;
case 15:
for (int i=0; i < niter; ++i) blas::caxpbypz(a2, *xD, b2, *yD, *zD);
break;
case 16:
for (int i=0; i < niter; ++i) blas::caxpbypczpw(a2, *xD, b2, *yD, c2, *zD, *wD);
break;
case 17:
for (int i=0; i < niter; ++i) blas::caxpyXmaz(a2, *xD, *yD, *zD);
break;
// double
case 18:
for (int i=0; i < niter; ++i) blas::norm2(*xD);
break;
case 19:
for (int i=0; i < niter; ++i) blas::reDotProduct(*xD, *yD);
break;
case 20:
for (int i=0; i < niter; ++i) blas::axpyNorm(a, *xD, *yD);
break;
case 21:
for (int i=0; i < niter; ++i) blas::xmyNorm(*xD, *yD);
break;
case 22:
for (int i=0; i < niter; ++i) blas::caxpyNorm(a2, *xD, *yD);
break;
case 23:
for (int i=0; i < niter; ++i) blas::caxpyXmazNormX(a2, *xD, *yD, *zD);
break;
case 24:
for (int i=0; i < niter; ++i) blas::cabxpyAxNorm(a, b2, *xD, *yD);
break;
// double2
case 25:
for (int i=0; i < niter; ++i) blas::cDotProduct(*xD, *yD);
break;
case 26:
for (int i=0; i < niter; ++i) blas::xpaycDotzy(*xD, a, *yD, *zD);
break;
case 27:
for (int i=0; i < niter; ++i) blas::caxpyDotzy(a2, *xD, *yD, *zD);
break;
// double3
case 28:
for (int i=0; i < niter; ++i) blas::cDotProductNormA(*xD, *yD);
break;
case 29:
for (int i=0; i < niter; ++i) blas::cDotProductNormB(*xD, *yD);
break;
case 30:
for (int i=0; i < niter; ++i) blas::caxpbypzYmbwcDotProductUYNormY(a2, *xD, b2, *yD, *zD, *wD, *vD);
break;
case 31:
for (int i=0; i < niter; ++i) blas::HeavyQuarkResidualNorm(*xD, *yD);
break;
case 32:
for (int i=0; i < niter; ++i) blas::xpyHeavyQuarkResidualNorm(*xD, *yD, *zD);
break;
case 33:
for (int i=0; i < niter; ++i) blas::tripleCGReduction(*xD, *yD, *zD);
break;
case 34:
for (int i=0; i < niter; ++i) blas::tripleCGUpdate(a, b, *xD, *yD, *zD, *wD);
break;
case 35:
for (int i=0; i < niter; ++i) blas::caxpy(A, *xmD,* ymD);
break;
case 36:
for (int i=0; i < niter; ++i) blas::axpyBzpcx((double*)A, xmD->Components(), zmD->Components(), (double*)B, *yD, (double*)C);
break;
default:
errorQuda("Undefined blas kernel %d\n", kernel);
}
}
hipEventRecord(end, 0);
hipEventSynchronize(end);
float runTime;
hipEventElapsedTime(&runTime, start, end);
hipEventDestroy(start);
hipEventDestroy(end);
delete[] A;
delete[] B;
delete[] C;
double secs = runTime / 1000;
return secs;
}
#define ERROR(a) fabs(blas::norm2(*a##D) - blas::norm2(*a##H)) / blas::norm2(*a##H)
double test(int kernel) {
double a = M_PI, b = M_PI*exp(1.0), c = sqrt(M_PI);
quda::Complex a2(a, b), b2(b, -c), c2(a+b, c*a);
double error = 0;
quda::Complex * A = new quda::Complex[Nsrc*Msrc];
quda::Complex * B = new quda::Complex[Nsrc*Msrc];
quda::Complex * C = new quda::Complex[Nsrc*Msrc];
for(int i=0; i < Nsrc*Msrc; i++){
A[i] = a2* (1.0*((i/Nsrc) + i)) + b2 * (1.0*i) + c2 *(1.0*(Nsrc*Msrc/2-i));
B[i] = a2* (1.0*((i/Nsrc) + i)) - b2 * (M_PI*i) + c2 *(1.0*(Nsrc*Msrc/2-i));
C[i] = a2* (1.0*((M_PI/Nsrc) + i)) + b2 * (1.0*i) + c2 *(1.0*(Nsrc*Msrc/2-i));
}
// A[0] = a2;
// A[1] = 0.;
// A[2] = 0.;
// A[3] = 0.;
switch (kernel) {
case 0:
*hD = *hH;
blas::copy(*yD, *hD);
blas::copy(*yH, *hH);
error = ERROR(y);
break;
case 1:
*lD = *lH;
blas::copy(*yD, *lD);
blas::copy(*yH, *lH);
error = ERROR(y);
break;
case 2:
*xD = *xH;
*yD = *yH;
blas::axpby(a, *xD, b, *yD);
blas::axpby(a, *xH, b, *yH);
error = ERROR(y);
break;
case 3:
*xD = *xH;
*yD = *yH;
blas::xpy(*xD, *yD);
blas::xpy(*xH, *yH);
error = ERROR(y);
break;
case 4:
*xD = *xH;
*yD = *yH;
blas::axpy(a, *xD, *yD);
blas::axpy(a, *xH, *yH);
*zH = *yD;
error = ERROR(y);
break;
case 5:
*xD = *xH;
*yD = *yH;
blas::xpay(*xD, a, *yD);
blas::xpay(*xH, a, *yH);
error = ERROR(y);
break;
case 6:
*xD = *xH;
*yD = *yH;
blas::mxpy(*xD, *yD);
blas::mxpy(*xH, *yH);
error = ERROR(y);
break;
case 7:
*xD = *xH;
blas::ax(a, *xD);
blas::ax(a, *xH);
error = ERROR(x);
break;
case 8:
*xD = *xH;
*yD = *yH;
blas::caxpy(a2, *xD, *yD);
blas::caxpy(a2, *xH, *yH);
error = ERROR(y);
break;
case 9:
*xD = *xH;
*yD = *yH;
blas::caxpby(a2, *xD, b2, *yD);
blas::caxpby(a2, *xH, b2, *yH);
error = ERROR(y);
break;
case 10:
*xD = *xH;
*yD = *yH;
*zD = *zH;
blas::cxpaypbz(*xD, a2, *yD, b2, *zD);
blas::cxpaypbz(*xH, a2, *yH, b2, *zH);
error = ERROR(z);
break;
case 11:
*xD = *xH;
*yD = *yH;
*zD = *zH;
blas::axpyBzpcx(a, *xD, *yD, b, *zD, c);
blas::axpyBzpcx(a, *xH, *yH, b, *zH, c);
error = ERROR(x) + ERROR(y);
break;
case 12:
*xD = *xH;
*yD = *yH;
*zD = *zH;
blas::axpyZpbx(a, *xD, *yD, *zD, b);
blas::axpyZpbx(a, *xH, *yH, *zH, b);
error = ERROR(x) + ERROR(y);
break;
case 13:
*xD = *xH;
*yD = *yH;
*zD = *zH;
*wD = *wH;
blas::caxpbypzYmbw(a2, *xD, b2, *yD, *zD, *wD);
blas::caxpbypzYmbw(a2, *xH, b2, *yH, *zH, *wH);
error = ERROR(z) + ERROR(y);
break;
case 14:
*xD = *xH;
*yD = *yH;
blas::cabxpyAx(a, b2, *xD, *yD);
blas::cabxpyAx(a, b2, *xH, *yH);
error = ERROR(y) + ERROR(x);
break;
case 15:
*xD = *xH;
*yD = *yH;
*zD = *zH;
{blas::caxpbypz(a2, *xD, b2, *yD, *zD);
blas::caxpbypz(a2, *xH, b2, *yH, *zH);
error = ERROR(z); }
break;
case 16:
*xD = *xH;
*yD = *yH;
*zD = *zH;
*wD = *wH;
{blas::caxpbypczpw(a2, *xD, b2, *yD, c2, *zD, *wD);
blas::caxpbypczpw(a2, *xH, b2, *yH, c2, *zH, *wH);
error = ERROR(w); }
break;
case 17:
*xD = *xH;
*yD = *yH;
*zD = *zH;
{blas::caxpyXmaz(a, *xD, *yD, *zD);
blas::caxpyXmaz(a, *xH, *yH, *zH);
error = ERROR(y) + ERROR(x);}
break;
// double
case 18:
*xD = *xH;
*yH = *xD;
error = fabs(blas::norm2(*xD) - blas::norm2(*xH)) / blas::norm2(*xH);
break;
case 19:
*xD = *xH;
*yD = *yH;
error = fabs(blas::reDotProduct(*xD, *yD) - blas::reDotProduct(*xH, *yH)) / fabs(blas::reDotProduct(*xH, *yH));
break;
case 20:
*xD = *xH;
*yD = *yH;
{double d = blas::axpyNorm(a, *xD, *yD);
double h = blas::axpyNorm(a, *xH, *yH);
error = ERROR(y) + fabs(d-h)/fabs(h);}
break;
case 21:
*xD = *xH;
*yD = *yH;
{double d = blas::xmyNorm(*xD, *yD);
double h = blas::xmyNorm(*xH, *yH);
error = ERROR(y) + fabs(d-h)/fabs(h);}
break;
case 22:
*xD = *xH;
*yD = *yH;
{double d = blas::caxpyNorm(a, *xD, *yD);
double h = blas::caxpyNorm(a, *xH, *yH);
error = ERROR(y) + fabs(d-h)/fabs(h);}
break;
case 23:
*xD = *xH;
*yD = *yH;
*zD = *zH;
{double d = blas::caxpyXmazNormX(a, *xD, *yD, *zD);
double h = blas::caxpyXmazNormX(a, *xH, *yH, *zH);
error = ERROR(y) + ERROR(x) + fabs(d-h)/fabs(h);}
break;
case 24:
*xD = *xH;
*yD = *yH;
{double d = blas::cabxpyAxNorm(a, b2, *xD, *yD);
double h = blas::cabxpyAxNorm(a, b2, *xH, *yH);
error = ERROR(x) + ERROR(y) + fabs(d-h)/fabs(h);}
break;
// double2
case 25:
*xD = *xH;
*yD = *yH;
error = abs(blas::cDotProduct(*xD, *yD) - blas::cDotProduct(*xH, *yH)) / abs(blas::cDotProduct(*xH, *yH));
break;
case 26:
*xD = *xH;
*yD = *yH;
*zD = *zH;
{ quda::Complex d = blas::xpaycDotzy(*xD, a, *yD, *zD);
quda::Complex h = blas::xpaycDotzy(*xH, a, *yH, *zH);
error = fabs(blas::norm2(*yD) - blas::norm2(*yH)) / blas::norm2(*yH) + abs(d-h)/abs(h);
}
break;
case 27:
*xD = *xH;
*yD = *yH;
*zD = *zH;
{quda::Complex d = blas::caxpyDotzy(a, *xD, *yD, *zD);
quda::Complex h = blas::caxpyDotzy(a, *xH, *yH, *zH);
error = ERROR(y) + abs(d-h)/abs(h);}
break;
// double3
case 28:
*xD = *xH;
*yD = *yH;
{ double3 d = blas::cDotProductNormA(*xD, *yD);
double3 h = blas::cDotProductNormA(*xH, *yH);
error = fabs(d.x - h.x) / fabs(h.x) + fabs(d.y - h.y) / fabs(h.y) + fabs(d.z - h.z) / fabs(h.z); }
break;
case 29:
*xD = *xH;
*yD = *yH;
{ double3 d = blas::cDotProductNormB(*xD, *yD);
double3 h = blas::cDotProductNormB(*xH, *yH);
error = fabs(d.x - h.x) / fabs(h.x) + fabs(d.y - h.y) / fabs(h.y) + fabs(d.z - h.z) / fabs(h.z); }
break;
case 30:
*xD = *xH;
*yD = *yH;
*zD = *zH;
*wD = *wH;
*vD = *vH;
{ double3 d = blas::caxpbypzYmbwcDotProductUYNormY(a2, *xD, b2, *yD, *zD, *wD, *vD);
double3 h = blas::caxpbypzYmbwcDotProductUYNormY(a2, *xH, b2, *yH, *zH, *wH, *vH);
error = ERROR(z) + ERROR(y) + fabs(d.x - h.x) / fabs(h.x) +
fabs(d.y - h.y) / fabs(h.y) + fabs(d.z - h.z) / fabs(h.z); }
break;
case 31:
*xD = *xH;
*yD = *yH;
{ double3 d = blas::HeavyQuarkResidualNorm(*xD, *yD);
double3 h = blas::HeavyQuarkResidualNorm(*xH, *yH);
error = fabs(d.x - h.x) / fabs(h.x) +
fabs(d.y - h.y) / fabs(h.y) + fabs(d.z - h.z) / fabs(h.z); }
break;
case 32:
*xD = *xH;
*yD = *yH;
*zD = *zH;
{ double3 d = blas::xpyHeavyQuarkResidualNorm(*xD, *yD, *zD);
double3 h = blas::xpyHeavyQuarkResidualNorm(*xH, *yH, *zH);
error = ERROR(y) + fabs(d.x - h.x) / fabs(h.x) +
fabs(d.y - h.y) / fabs(h.y) + fabs(d.z - h.z) / fabs(h.z); }
break;
case 33:
*xD = *xH;
*yD = *yH;
*zD = *zH;
{ double3 d = blas::tripleCGReduction(*xD, *yD, *zD);
double3 h = make_double3(blas::norm2(*xH), blas::norm2(*yH), blas::reDotProduct(*yH, *zH));
error = fabs(d.x - h.x) / fabs(h.x) +
fabs(d.y - h.y) / fabs(h.y) + fabs(d.z - h.z) / fabs(h.z); }
break;
case 34:
*xD = *xH;
*yD = *yH;
*zD = *zH;
*wD = *wH;
{ blas::tripleCGUpdate(a, b, *xD, *yD, *zD, *wD);
blas::tripleCGUpdate(a, b, *xH, *yH, *zH, *wH);
error = ERROR(y) + ERROR(z) + ERROR(w); }
break;
case 35:
for (int i=0; i < Nsrc; i++) xmD->Component(i) = *(xmH[i]);
for (int i=0; i < Msrc; i++) ymD->Component(i) = *(ymH[i]);
blas::caxpy(A, *xmD, *ymD);
for (int i=0; i < Nsrc; i++){
for(int j=0; j < Msrc; j++){
blas::caxpy(A[Msrc*i+j], *(xmH[i]), *(ymH[j]));
}
}
error = 0;
for (int i=0; i < Msrc; i++){
error+= fabs(blas::norm2((ymD->Component(i))) - blas::norm2(*(ymH[i]))) / blas::norm2(*(ymH[i]));
}
error/= Msrc;
break;
case 36:
for (int i=0; i < Nsrc; i++) {
xmD->Component(i) = *(xmH[i]);
zmD->Component(i) = *(zmH[i]);
}
*yD = *yH;
blas::axpyBzpcx((double*)A, xmD->Components(), zmD->Components(), (double*)B, *yD, (const double*)C);
for (int i=0; i<Nsrc; i++) {
blas::axpyBzpcx(((double*)A)[i], *xmH[i], *zmH[i], ((double*)B)[i], *yH, ((double*)C)[i]);
}
error = 0;
for (int i=0; i < Nsrc; i++){
error+= fabs(blas::norm2((xmD->Component(i))) - blas::norm2(*(xmH[i]))) / blas::norm2(*(xmH[i]));
//error+= fabs(blas::norm2((zmD->Component(i))) - blas::norm2(*(zmH[i]))) / blas::norm2(*(zmH[i]));
}
error/= Nsrc;
break;
default:
errorQuda("Undefined blas kernel %d\n", kernel);
}
delete[] A;
delete[] B;
delete[] C;
return error;
}
const char *prec_str[] = {"half", "single", "double"};
const char *names[] = {
"copyHS",
"copyLS",
"axpby",
"xpy",
"axpy",
"xpay",
"mxpy",
"ax",
"caxpy",
"caxpby",
"cxpaypbz",
"axpyBzpcx",
"axpyZpbx",
"caxpbypzYmbw",
"cabxpyAx",
"caxpbypz",
"caxpbypczpw",
"caxpyXmaz",
"norm",
"reDotProduct",
"axpyNorm",
"xmyNorm",
"caxpyNorm",
"caxpyXmazNormX",
"cabxpyAxNorm",
"cDotProduct",
"xpaycDotzy",
"caxpyDotzy",
"cDotProductNormA",
"cDotProductNormB",
"caxpbypzYmbwcDotProductUYNormY",
"HeavyQuarkResidualNorm",
"xpyHeavyQuarkResidualNorm",
"tripleCGReduction",
"tripleCGUpdate",
"caxpy (block)",
"axpyBzpcx (block)",
};
int main(int argc, char** argv)
{
for (int i = 1; i < argc; i++){
if(process_command_line_option(argc, argv, &i) == 0){
continue;
}
printfQuda("ERROR: Invalid option:%s\n", argv[i]);
usage(argv);
}
// override spin setting if mg solver is set to test coarse grids
if (inv_type == QUDA_MG_INVERTER) {
Nspin = 2;
Ncolor = nvec;
} else {
// set spin according to the type of dslash
Nspin = (dslash_type == QUDA_ASQTAD_DSLASH ||
dslash_type == QUDA_STAGGERED_DSLASH) ? 1 : 4;
Ncolor = 3;
}
setSpinorSiteSize(24);
initComms(argc, argv, gridsize_from_cmdline);
display_test_info();
initQuda(device);
// enable the tuning
setTuning(tune ? QUDA_TUNE_YES : QUDA_TUNE_NO);
setVerbosity(QUDA_SILENT);
for (int prec = 0; prec < Nprec; prec++) {
if (Nspin == 2 && prec == 0) continue;
printfQuda("\nBenchmarking %s precision with %d iterations...\n\n", prec_str[prec], niter);
initFields(prec);
for (int kernel = 0; kernel < Nkernels; kernel++) {
if (skip_kernel(prec, kernel)) continue;
// do the initial tune
benchmark(kernel, 1);
// now rerun with more iterations to get accurate speed measurements
quda::blas::flops = 0;
quda::blas::bytes = 0;
double secs = benchmark(kernel, niter);
double gflops = (quda::blas::flops*1e-9)/(secs);
double gbytes = quda::blas::bytes/(secs*1e9);
printfQuda("%-31s: Gflop/s = %6.1f, GB/s = %6.1f\n", names[kernel], gflops, gbytes);
}
freeFields();
}
// clear the error state
hipGetLastError();
// lastly check for correctness
if (verify_results) {
::testing::InitGoogleTest(&argc, argv);
if (RUN_ALL_TESTS() != 0) warningQuda("Tests failed");
}
endQuda();
finalizeComms();
}
// The following tests each kernel at each precision using the google testing framework
class BlasTest : public ::testing::TestWithParam<int2> {
protected:
int2 param;
public:
virtual ~BlasTest() { }
virtual void SetUp() {
param = GetParam();
initFields(param.x);
}
virtual void TearDown() { freeFields(); }
virtual void NormalExit() { printf("monkey\n"); }
};
TEST_P(BlasTest, verify) {
int prec = param.x;
int kernel = param.y;
// certain tests will fail to run for coarse grids so mark these as
// failed without running
double deviation = skip_kernel(prec,kernel) ? 1.0 : test(kernel);
printfQuda("%-35s error = %e\n", names[kernel], deviation);
double tol = (prec == 2 ? 1e-11 : (prec == 1 ? 1e-5 : 1e-3));
tol = (kernel < 2) ? 1e-4 : tol; // use different tolerance for copy
EXPECT_LE(deviation, tol) << "CPU and CUDA implementations do not agree";
}
// half precision
INSTANTIATE_TEST_CASE_P(copyHS_half, BlasTest, ::testing::Values( make_int2(0,0) ));
INSTANTIATE_TEST_CASE_P(copyLS_half, BlasTest, ::testing::Values( make_int2(0,1) ));
INSTANTIATE_TEST_CASE_P(axpby_half, BlasTest, ::testing::Values( make_int2(0,2) ));
INSTANTIATE_TEST_CASE_P(xpy_half, BlasTest, ::testing::Values( make_int2(0,3) ));
INSTANTIATE_TEST_CASE_P(axpy_half, BlasTest, ::testing::Values( make_int2(0,4) ));
INSTANTIATE_TEST_CASE_P(xpay_half, BlasTest, ::testing::Values( make_int2(0,5) ));
INSTANTIATE_TEST_CASE_P(mxpy_half, BlasTest, ::testing::Values( make_int2(0,6) ));
INSTANTIATE_TEST_CASE_P(ax_half, BlasTest, ::testing::Values( make_int2(0,7) ));
INSTANTIATE_TEST_CASE_P(caxpy_half, BlasTest, ::testing::Values( make_int2(0,8) ));
INSTANTIATE_TEST_CASE_P(caxpby_half, BlasTest, ::testing::Values( make_int2(0,9) ));
INSTANTIATE_TEST_CASE_P(cxpaypbz_half, BlasTest, ::testing::Values( make_int2(0,10) ));
INSTANTIATE_TEST_CASE_P(axpyBzpcx_half, BlasTest, ::testing::Values( make_int2(0,11) ));
INSTANTIATE_TEST_CASE_P(axpyZpbx_half, BlasTest, ::testing::Values( make_int2(0,12) ));
INSTANTIATE_TEST_CASE_P(caxpbypzYmbw_half, BlasTest, ::testing::Values( make_int2(0,13) ));
INSTANTIATE_TEST_CASE_P(cabxpyAx_half, BlasTest, ::testing::Values( make_int2(0,14) ));
INSTANTIATE_TEST_CASE_P(caxpbypz_half, BlasTest, ::testing::Values( make_int2(0,15) ));
INSTANTIATE_TEST_CASE_P(caxpbypczpw_half, BlasTest, ::testing::Values( make_int2(0,16) ));
INSTANTIATE_TEST_CASE_P(caxpyXmaz_half, BlasTest, ::testing::Values( make_int2(0,17) ));
INSTANTIATE_TEST_CASE_P(norm2_half, BlasTest, ::testing::Values( make_int2(0,18) ));
INSTANTIATE_TEST_CASE_P(reDotProduct_half, BlasTest, ::testing::Values( make_int2(0,19) ));
INSTANTIATE_TEST_CASE_P(axpyNorm_half, BlasTest, ::testing::Values( make_int2(0,20) ));
INSTANTIATE_TEST_CASE_P(xmyNorm_half, BlasTest, ::testing::Values( make_int2(0,21) ));
INSTANTIATE_TEST_CASE_P(caxpyNorm_half, BlasTest, ::testing::Values( make_int2(0,22) ));
INSTANTIATE_TEST_CASE_P(caxpyXmazNormX_half, BlasTest, ::testing::Values( make_int2(0,23) ));
INSTANTIATE_TEST_CASE_P(cabxpyAxNorm_half, BlasTest, ::testing::Values( make_int2(0,24) ));
INSTANTIATE_TEST_CASE_P(cDotProduct_half, BlasTest, ::testing::Values( make_int2(0,25) ));
INSTANTIATE_TEST_CASE_P(xpaycDotzy_half, BlasTest, ::testing::Values( make_int2(0,26) ));
INSTANTIATE_TEST_CASE_P(caxpyDotzy_half, BlasTest, ::testing::Values( make_int2(0,27) ));
INSTANTIATE_TEST_CASE_P(cDotProductNormA_half, BlasTest, ::testing::Values( make_int2(0,28) ));
INSTANTIATE_TEST_CASE_P(cDotProductNormB_half, BlasTest, ::testing::Values( make_int2(0,29) ));
INSTANTIATE_TEST_CASE_P(caxpbypzYmbwcDotProductWYNormY_half, BlasTest, ::testing::Values( make_int2(0,30) ));
INSTANTIATE_TEST_CASE_P(HeavyQuarkResidualNorm_half, BlasTest, ::testing::Values( make_int2(0,31) ));
INSTANTIATE_TEST_CASE_P(xpyHeavyQuarkResidualNorm_half, BlasTest, ::testing::Values( make_int2(0,32) ));
INSTANTIATE_TEST_CASE_P(TripleCGReduction_half, BlasTest, ::testing::Values( make_int2(0,33) ));
INSTANTIATE_TEST_CASE_P(TripleCGUpdate_half, BlasTest, ::testing::Values( make_int2(0,34) ));
INSTANTIATE_TEST_CASE_P(multicaxpy_half, BlasTest, ::testing::Values( make_int2(0,35) ));
INSTANTIATE_TEST_CASE_P(multiaxpyBzpcx_half, BlasTest, ::testing::Values( make_int2(0,36) ));
// single precision
INSTANTIATE_TEST_CASE_P(copyHS_single, BlasTest, ::testing::Values( make_int2(1,0) ));
INSTANTIATE_TEST_CASE_P(copyLS_single, BlasTest, ::testing::Values( make_int2(1,1) ));
INSTANTIATE_TEST_CASE_P(axpby_single, BlasTest, ::testing::Values( make_int2(1,2) ));
INSTANTIATE_TEST_CASE_P(xpy_single, BlasTest, ::testing::Values( make_int2(1,3) ));
INSTANTIATE_TEST_CASE_P(axpy_single, BlasTest, ::testing::Values( make_int2(1,4) ));
INSTANTIATE_TEST_CASE_P(xpay_single, BlasTest, ::testing::Values( make_int2(1,5) ));
INSTANTIATE_TEST_CASE_P(mxpy_single, BlasTest, ::testing::Values( make_int2(1,6) ));
INSTANTIATE_TEST_CASE_P(ax_single, BlasTest, ::testing::Values( make_int2(1,7) ));
INSTANTIATE_TEST_CASE_P(caxpy_single, BlasTest, ::testing::Values( make_int2(1,8) ));
INSTANTIATE_TEST_CASE_P(caxpby_single, BlasTest, ::testing::Values( make_int2(1,9) ));
INSTANTIATE_TEST_CASE_P(cxpaypbz_single, BlasTest, ::testing::Values( make_int2(1,10) ));
INSTANTIATE_TEST_CASE_P(axpyBzpcx_single, BlasTest, ::testing::Values( make_int2(1,11) ));
INSTANTIATE_TEST_CASE_P(axpyZpbx_single, BlasTest, ::testing::Values( make_int2(1,12) ));
INSTANTIATE_TEST_CASE_P(caxpbypzYmbw_single, BlasTest, ::testing::Values( make_int2(1,13) ));
INSTANTIATE_TEST_CASE_P(cabxpyAx_single, BlasTest, ::testing::Values( make_int2(1,14) ));
INSTANTIATE_TEST_CASE_P(caxpbypz_single, BlasTest, ::testing::Values( make_int2(1,15) ));
INSTANTIATE_TEST_CASE_P(caxpbypczpw_single, BlasTest, ::testing::Values( make_int2(1,16) ));
INSTANTIATE_TEST_CASE_P(caxpyXmaz_single, BlasTest, ::testing::Values( make_int2(1,17) ));
INSTANTIATE_TEST_CASE_P(norm2_single, BlasTest, ::testing::Values( make_int2(1,18) ));
INSTANTIATE_TEST_CASE_P(reDotProduct_single, BlasTest, ::testing::Values( make_int2(1,19) ));
INSTANTIATE_TEST_CASE_P(axpyNorm_single, BlasTest, ::testing::Values( make_int2(1,20) ));
INSTANTIATE_TEST_CASE_P(xmyNorm_single, BlasTest, ::testing::Values( make_int2(1,21) ));
INSTANTIATE_TEST_CASE_P(caxpyNorm_single, BlasTest, ::testing::Values( make_int2(1,22) ));
INSTANTIATE_TEST_CASE_P(caxpyXmazNormX_single, BlasTest, ::testing::Values( make_int2(1,23) ));
INSTANTIATE_TEST_CASE_P(cabxpyAxNorm_single, BlasTest, ::testing::Values( make_int2(1,24) ));
INSTANTIATE_TEST_CASE_P(cDotProduct_single, BlasTest, ::testing::Values( make_int2(1,25) ));
INSTANTIATE_TEST_CASE_P(xpaycDotzy_single, BlasTest, ::testing::Values( make_int2(1,26) ));
INSTANTIATE_TEST_CASE_P(caxpyDotzy_single, BlasTest, ::testing::Values( make_int2(1,27) ));
INSTANTIATE_TEST_CASE_P(cDotProductNormA_single, BlasTest, ::testing::Values( make_int2(1,28) ));
INSTANTIATE_TEST_CASE_P(cDotProductNormB_single, BlasTest, ::testing::Values( make_int2(1,29) ));
INSTANTIATE_TEST_CASE_P(caxpbypzYmbwcDotProductWYNormY_single, BlasTest, ::testing::Values( make_int2(1,30) ));
INSTANTIATE_TEST_CASE_P(HeavyQuarkResidualNorm_single, BlasTest, ::testing::Values( make_int2(1,31) ));
INSTANTIATE_TEST_CASE_P(xpyHeavyQuarkResidualNorm_single, BlasTest, ::testing::Values( make_int2(1,32) ));
INSTANTIATE_TEST_CASE_P(TripleCGReduction_single, BlasTest, ::testing::Values( make_int2(1,33) ));
INSTANTIATE_TEST_CASE_P(TripleCGUpdate_single, BlasTest, ::testing::Values( make_int2(1,34) ));
INSTANTIATE_TEST_CASE_P(multicaxpy_single, BlasTest, ::testing::Values( make_int2(1,35) ));
INSTANTIATE_TEST_CASE_P(multiaxpyBzpcx_single, BlasTest, ::testing::Values( make_int2(1,36) ));
// double precision
INSTANTIATE_TEST_CASE_P(copyHS_double, BlasTest, ::testing::Values( make_int2(2,0) ));
INSTANTIATE_TEST_CASE_P(copyLS_double, BlasTest, ::testing::Values( make_int2(2,1) ));
INSTANTIATE_TEST_CASE_P(axpby_double, BlasTest, ::testing::Values( make_int2(2,2) ));
INSTANTIATE_TEST_CASE_P(xpy_double, BlasTest, ::testing::Values( make_int2(2,3) ));
INSTANTIATE_TEST_CASE_P(axpy_double, BlasTest, ::testing::Values( make_int2(2,4) ));
INSTANTIATE_TEST_CASE_P(xpay_double, BlasTest, ::testing::Values( make_int2(2,5) ));
INSTANTIATE_TEST_CASE_P(mxpy_double, BlasTest, ::testing::Values( make_int2(2,6) ));
INSTANTIATE_TEST_CASE_P(ax_double, BlasTest, ::testing::Values( make_int2(2,7) ));
INSTANTIATE_TEST_CASE_P(caxpy_double, BlasTest, ::testing::Values( make_int2(2,8) ));
INSTANTIATE_TEST_CASE_P(caxpby_double, BlasTest, ::testing::Values( make_int2(2,9) ));
INSTANTIATE_TEST_CASE_P(cxpaypbz_double, BlasTest, ::testing::Values( make_int2(2,10) ));
INSTANTIATE_TEST_CASE_P(axpyBzpcx_double, BlasTest, ::testing::Values( make_int2(2,11) ));
INSTANTIATE_TEST_CASE_P(axpyZpbx_double, BlasTest, ::testing::Values( make_int2(2,12) ));
INSTANTIATE_TEST_CASE_P(caxpbypzYmbw_double, BlasTest, ::testing::Values( make_int2(2,13) ));
INSTANTIATE_TEST_CASE_P(cabxpyAx_double, BlasTest, ::testing::Values( make_int2(2,14) ));
INSTANTIATE_TEST_CASE_P(caxpbypz_double, BlasTest, ::testing::Values( make_int2(2,15) ));
INSTANTIATE_TEST_CASE_P(caxpbypczpw_double, BlasTest, ::testing::Values( make_int2(2,16) ));
INSTANTIATE_TEST_CASE_P(caxpyXmaz_double, BlasTest, ::testing::Values( make_int2(2,17) ));
INSTANTIATE_TEST_CASE_P(norm2_double, BlasTest, ::testing::Values( make_int2(2,18) ));
INSTANTIATE_TEST_CASE_P(reDotProduct_double, BlasTest, ::testing::Values( make_int2(2,19) ));
INSTANTIATE_TEST_CASE_P(axpyNorm_double, BlasTest, ::testing::Values( make_int2(2,20) ));
INSTANTIATE_TEST_CASE_P(xmyNorm_double, BlasTest, ::testing::Values( make_int2(2,21) ));
INSTANTIATE_TEST_CASE_P(caxpyNorm_double, BlasTest, ::testing::Values( make_int2(2,22) ));
INSTANTIATE_TEST_CASE_P(caxpyXmazNormX_double, BlasTest, ::testing::Values( make_int2(2,23) ));
INSTANTIATE_TEST_CASE_P(cabxpyAxNorm_double, BlasTest, ::testing::Values( make_int2(2,24) ));
INSTANTIATE_TEST_CASE_P(cDotProduct_double, BlasTest, ::testing::Values( make_int2(2,25) ));
INSTANTIATE_TEST_CASE_P(xpaycDotzy_double, BlasTest, ::testing::Values( make_int2(2,26) ));
INSTANTIATE_TEST_CASE_P(caxpyDotzy_double, BlasTest, ::testing::Values( make_int2(2,27) ));
INSTANTIATE_TEST_CASE_P(cDotProductNormA_double, BlasTest, ::testing::Values( make_int2(2,28) ));
INSTANTIATE_TEST_CASE_P(cDotProductNormB_double, BlasTest, ::testing::Values( make_int2(2,29) ));
INSTANTIATE_TEST_CASE_P(caxpbypzYmbwcDotProductWYNormY_double, BlasTest, ::testing::Values( make_int2(2,30) ));
INSTANTIATE_TEST_CASE_P(HeavyQuarkResidualNorm_double, BlasTest, ::testing::Values( make_int2(2,31) ));
INSTANTIATE_TEST_CASE_P(xpyHeavyQuarkResidualNorm_double, BlasTest, ::testing::Values( make_int2(2,32) ));
INSTANTIATE_TEST_CASE_P(TripleCGReduction_double, BlasTest, ::testing::Values( make_int2(2,33) ));
INSTANTIATE_TEST_CASE_P(TripleCGUpdate_double, BlasTest, ::testing::Values( make_int2(2,34) ));
INSTANTIATE_TEST_CASE_P(multicaxpy_double, BlasTest, ::testing::Values( make_int2(2,35) ));
INSTANTIATE_TEST_CASE_P(multiaxpyBzpcx_double, BlasTest, ::testing::Values( make_int2(2,36) ));
| e43a0268cadfb6f08e50dbccd2f64ea0205de1c8.cu | #include <stdio.h>
#include <stdlib.h>
#include <quda_internal.h>
#include <color_spinor_field.h>
#include <blas_quda.h>
#include <test_util.h>
#include <face_quda.h>
// include because of nasty globals used in the tests
#include <dslash_util.h>
// google test
#include <gtest.h>
extern QudaDslashType dslash_type;
extern QudaInverterType inv_type;
extern int nvec;
extern bool tune;
extern int device;
extern int xdim;
extern int ydim;
extern int zdim;
extern int tdim;
extern int gridsize_from_cmdline[];
extern int niter;
extern bool tune;
extern bool verify_results;
extern int Nsrc;
extern int Msrc;
extern void usage(char** );
const int Nkernels = 37;
using namespace quda;
ColorSpinorField *xH, *yH, *zH, *wH, *vH, *hH, *lH;
ColorSpinorField *xD, *yD, *zD, *wD, *vD, *hD, *lD, *xmD, *ymD, *zmD;
std::vector<cpuColorSpinorField*> xmH;
std::vector<cpuColorSpinorField*> ymH;
std::vector<cpuColorSpinorField*> zmH;
int Nspin;
int Ncolor;
void setPrec(ColorSpinorParam ¶m, const QudaPrecision precision)
{
param.precision = precision;
if (Nspin == 1 || Nspin == 2 || precision == QUDA_DOUBLE_PRECISION) {
param.fieldOrder = QUDA_FLOAT2_FIELD_ORDER;
} else {
param.fieldOrder = QUDA_FLOAT4_FIELD_ORDER;
}
}
void
display_test_info()
{
printfQuda("running the following test:\n");
printfQuda("S_dimension T_dimension Nspin Ncolor\n");
printfQuda("%3d /%3d / %3d %3d %d %d\n", xdim, ydim, zdim, tdim, Nspin, Ncolor);
printfQuda("Grid partition info: X Y Z T\n");
printfQuda(" %d %d %d %d\n",
dimPartitioned(0),
dimPartitioned(1),
dimPartitioned(2),
dimPartitioned(3));
return;
}
int Nprec = 3;
bool skip_kernel(int precision, int kernel) {
if ( Nspin == 2 && precision == 0) {
// avoid half precision tests if doing coarse fields
return true;
} else if (Nspin == 2 && kernel == 1) {
// avoid low-precision copy if doing coarse fields
return true;
} else if (Ncolor != 3 && (kernel == 31 || kernel == 32)) {
// only benchmark heavy-quark norm if doing 3 colors
return true;
} else if ((Nprec < 3) && (kernel == 0)) {
// only benchmark high-precision copy() if double is supported
return true;
}
return false;
}
void initFields(int prec)
{
// precisions used for the source field in the copyCuda() benchmark
QudaPrecision high_aux_prec = QUDA_INVALID_PRECISION;
QudaPrecision low_aux_prec = QUDA_INVALID_PRECISION;
ColorSpinorParam param;
param.nColor = Ncolor;
param.nSpin = Nspin;
param.nDim = 4; // number of spacetime dimensions
param.pad = 0; // padding must be zero for cpu fields
param.siteSubset = QUDA_PARITY_SITE_SUBSET;
if (param.siteSubset == QUDA_PARITY_SITE_SUBSET) param.x[0] = xdim/2;
else param.x[0] = xdim;
param.x[1] = ydim;
param.x[2] = zdim;
param.x[3] = tdim;
param.siteOrder = QUDA_EVEN_ODD_SITE_ORDER;
param.gammaBasis = QUDA_DEGRAND_ROSSI_GAMMA_BASIS;
param.precision = QUDA_DOUBLE_PRECISION;
param.fieldOrder = QUDA_SPACE_SPIN_COLOR_FIELD_ORDER;
param.create = QUDA_ZERO_FIELD_CREATE;
vH = new cpuColorSpinorField(param);
wH = new cpuColorSpinorField(param);
xH = new cpuColorSpinorField(param);
yH = new cpuColorSpinorField(param);
zH = new cpuColorSpinorField(param);
hH = new cpuColorSpinorField(param);
lH = new cpuColorSpinorField(param);
// create composite fields
// xmH = new cpuColorSpinorField(param);
// ymH = new cpuColorSpinorField(param);
xmH.reserve(Nsrc);
for (int cid = 0; cid < Nsrc; cid++) xmH.push_back(new cpuColorSpinorField(param));
ymH.reserve(Msrc);
for (int cid = 0; cid < Msrc; cid++) ymH.push_back(new cpuColorSpinorField(param));
zmH.reserve(Nsrc);
for (int cid = 0; cid < Nsrc; cid++) zmH.push_back(new cpuColorSpinorField(param));
static_cast<cpuColorSpinorField*>(vH)->Source(QUDA_RANDOM_SOURCE, 0, 0, 0);
static_cast<cpuColorSpinorField*>(wH)->Source(QUDA_RANDOM_SOURCE, 0, 0, 0);
static_cast<cpuColorSpinorField*>(xH)->Source(QUDA_RANDOM_SOURCE, 0, 0, 0);
static_cast<cpuColorSpinorField*>(yH)->Source(QUDA_RANDOM_SOURCE, 0, 0, 0);
static_cast<cpuColorSpinorField*>(zH)->Source(QUDA_RANDOM_SOURCE, 0, 0, 0);
static_cast<cpuColorSpinorField*>(hH)->Source(QUDA_RANDOM_SOURCE, 0, 0, 0);
static_cast<cpuColorSpinorField*>(lH)->Source(QUDA_RANDOM_SOURCE, 0, 0, 0);
for(int i=0; i<Nsrc; i++){
static_cast<cpuColorSpinorField*>(xmH[i])->Source(QUDA_RANDOM_SOURCE, 0, 0, 0);
}
for(int i=0; i<Msrc; i++){
static_cast<cpuColorSpinorField*>(ymH[i])->Source(QUDA_RANDOM_SOURCE, 0, 0, 0);
}
// Now set the parameters for the cuda fields
//param.pad = xdim*ydim*zdim/2;
if (param.nSpin == 4) param.gammaBasis = QUDA_UKQCD_GAMMA_BASIS;
param.create = QUDA_ZERO_FIELD_CREATE;
switch(prec) {
case 0:
setPrec(param, QUDA_HALF_PRECISION);
high_aux_prec = QUDA_DOUBLE_PRECISION;
low_aux_prec = QUDA_SINGLE_PRECISION;
break;
case 1:
setPrec(param, QUDA_SINGLE_PRECISION);
high_aux_prec = QUDA_DOUBLE_PRECISION;
low_aux_prec = QUDA_HALF_PRECISION;
break;
case 2:
setPrec(param, QUDA_DOUBLE_PRECISION);
high_aux_prec = QUDA_SINGLE_PRECISION;
low_aux_prec = QUDA_HALF_PRECISION;
break;
default:
errorQuda("Precision option not defined");
}
checkCudaError();
vD = new cudaColorSpinorField(param);
wD = new cudaColorSpinorField(param);
xD = new cudaColorSpinorField(param);
yD = new cudaColorSpinorField(param);
zD = new cudaColorSpinorField(param);
param.is_composite = true;
param.is_component = false;
// create composite fields
param.composite_dim = Nsrc;
xmD = new cudaColorSpinorField(param);
param.composite_dim = Msrc;
ymD = new cudaColorSpinorField(param);
param.composite_dim = Nsrc;
zmD = new cudaColorSpinorField(param);
param.is_composite = false;
param.is_component = false;
param.composite_dim = 1;
setPrec(param, high_aux_prec);
hD = new cudaColorSpinorField(param);
setPrec(param, low_aux_prec);
lD = new cudaColorSpinorField(param);
// check for successful allocation
checkCudaError();
// only do copy if not doing half precision with mg
bool flag = !(param.nSpin == 2 &&
(prec == 0 || low_aux_prec == QUDA_HALF_PRECISION) );
if ( flag ) {
*vD = *vH;
*wD = *wH;
*xD = *xH;
*yD = *yH;
*zD = *zH;
*hD = *hH;
*lD = *lH;
// for (int i=0; i < Nsrc; i++){
// xmD->Component(i) = *(xmH[i]);
// ymD->Component(i) = *(ymH[i]);
// }
// *ymD = *ymH;
}
}
void freeFields()
{
// release memory
delete vD;
delete wD;
delete xD;
delete yD;
delete zD;
delete hD;
delete lD;
delete xmD;
delete ymD;
delete zmD;
// release memory
delete vH;
delete wH;
delete xH;
delete yH;
delete zH;
delete hH;
delete lH;
for (int i=0; i < Nsrc; i++) delete xmH[i];
for (int i=0; i < Msrc; i++) delete ymH[i];
for (int i=0; i < Nsrc; i++) delete zmH[i];
xmH.clear();
ymH.clear();
zmH.clear();
}
double benchmark(int kernel, const int niter) {
double a, b, c;
quda::Complex a2, b2, c2;
quda::Complex * A = new quda::Complex[Nsrc*Msrc];
quda::Complex * B = new quda::Complex[Nsrc*Msrc];
quda::Complex * C = new quda::Complex[Nsrc*Msrc];
cudaEvent_t start, end;
cudaEventCreate(&start);
cudaEventCreate(&end);
cudaEventRecord(start, 0);
{
switch (kernel) {
case 0:
for (int i=0; i < niter; ++i) blas::copy(*yD, *hD);
break;
case 1:
for (int i=0; i < niter; ++i) blas::copy(*yD, *lD);
break;
case 2:
for (int i=0; i < niter; ++i) blas::axpby(a, *xD, b, *yD);
break;
case 3:
for (int i=0; i < niter; ++i) blas::xpy(*xD, *yD);
break;
case 4:
for (int i=0; i < niter; ++i) blas::axpy(a, *xD, *yD);
break;
case 5:
for (int i=0; i < niter; ++i) blas::xpay(*xD, a, *yD);
break;
case 6:
for (int i=0; i < niter; ++i) blas::mxpy(*xD, *yD);
break;
case 7:
for (int i=0; i < niter; ++i) blas::ax(a, *xD);
break;
case 8:
for (int i=0; i < niter; ++i) blas::caxpy(a2, *xD, *yD);
break;
case 9:
for (int i=0; i < niter; ++i) blas::caxpby(a2, *xD, b2, *yD);
break;
case 10:
for (int i=0; i < niter; ++i) blas::cxpaypbz(*xD, a2, *yD, b2, *zD);
break;
case 11:
for (int i=0; i < niter; ++i) blas::axpyBzpcx(a, *xD, *yD, b, *zD, c);
break;
case 12:
for (int i=0; i < niter; ++i) blas::axpyZpbx(a, *xD, *yD, *zD, b);
break;
case 13:
for (int i=0; i < niter; ++i) blas::caxpbypzYmbw(a2, *xD, b2, *yD, *zD, *wD);
break;
case 14:
for (int i=0; i < niter; ++i) blas::cabxpyAx(a, b2, *xD, *yD);
break;
case 15:
for (int i=0; i < niter; ++i) blas::caxpbypz(a2, *xD, b2, *yD, *zD);
break;
case 16:
for (int i=0; i < niter; ++i) blas::caxpbypczpw(a2, *xD, b2, *yD, c2, *zD, *wD);
break;
case 17:
for (int i=0; i < niter; ++i) blas::caxpyXmaz(a2, *xD, *yD, *zD);
break;
// double
case 18:
for (int i=0; i < niter; ++i) blas::norm2(*xD);
break;
case 19:
for (int i=0; i < niter; ++i) blas::reDotProduct(*xD, *yD);
break;
case 20:
for (int i=0; i < niter; ++i) blas::axpyNorm(a, *xD, *yD);
break;
case 21:
for (int i=0; i < niter; ++i) blas::xmyNorm(*xD, *yD);
break;
case 22:
for (int i=0; i < niter; ++i) blas::caxpyNorm(a2, *xD, *yD);
break;
case 23:
for (int i=0; i < niter; ++i) blas::caxpyXmazNormX(a2, *xD, *yD, *zD);
break;
case 24:
for (int i=0; i < niter; ++i) blas::cabxpyAxNorm(a, b2, *xD, *yD);
break;
// double2
case 25:
for (int i=0; i < niter; ++i) blas::cDotProduct(*xD, *yD);
break;
case 26:
for (int i=0; i < niter; ++i) blas::xpaycDotzy(*xD, a, *yD, *zD);
break;
case 27:
for (int i=0; i < niter; ++i) blas::caxpyDotzy(a2, *xD, *yD, *zD);
break;
// double3
case 28:
for (int i=0; i < niter; ++i) blas::cDotProductNormA(*xD, *yD);
break;
case 29:
for (int i=0; i < niter; ++i) blas::cDotProductNormB(*xD, *yD);
break;
case 30:
for (int i=0; i < niter; ++i) blas::caxpbypzYmbwcDotProductUYNormY(a2, *xD, b2, *yD, *zD, *wD, *vD);
break;
case 31:
for (int i=0; i < niter; ++i) blas::HeavyQuarkResidualNorm(*xD, *yD);
break;
case 32:
for (int i=0; i < niter; ++i) blas::xpyHeavyQuarkResidualNorm(*xD, *yD, *zD);
break;
case 33:
for (int i=0; i < niter; ++i) blas::tripleCGReduction(*xD, *yD, *zD);
break;
case 34:
for (int i=0; i < niter; ++i) blas::tripleCGUpdate(a, b, *xD, *yD, *zD, *wD);
break;
case 35:
for (int i=0; i < niter; ++i) blas::caxpy(A, *xmD,* ymD);
break;
case 36:
for (int i=0; i < niter; ++i) blas::axpyBzpcx((double*)A, xmD->Components(), zmD->Components(), (double*)B, *yD, (double*)C);
break;
default:
errorQuda("Undefined blas kernel %d\n", kernel);
}
}
cudaEventRecord(end, 0);
cudaEventSynchronize(end);
float runTime;
cudaEventElapsedTime(&runTime, start, end);
cudaEventDestroy(start);
cudaEventDestroy(end);
delete[] A;
delete[] B;
delete[] C;
double secs = runTime / 1000;
return secs;
}
#define ERROR(a) fabs(blas::norm2(*a##D) - blas::norm2(*a##H)) / blas::norm2(*a##H)
double test(int kernel) {
double a = M_PI, b = M_PI*exp(1.0), c = sqrt(M_PI);
quda::Complex a2(a, b), b2(b, -c), c2(a+b, c*a);
double error = 0;
quda::Complex * A = new quda::Complex[Nsrc*Msrc];
quda::Complex * B = new quda::Complex[Nsrc*Msrc];
quda::Complex * C = new quda::Complex[Nsrc*Msrc];
for(int i=0; i < Nsrc*Msrc; i++){
A[i] = a2* (1.0*((i/Nsrc) + i)) + b2 * (1.0*i) + c2 *(1.0*(Nsrc*Msrc/2-i));
B[i] = a2* (1.0*((i/Nsrc) + i)) - b2 * (M_PI*i) + c2 *(1.0*(Nsrc*Msrc/2-i));
C[i] = a2* (1.0*((M_PI/Nsrc) + i)) + b2 * (1.0*i) + c2 *(1.0*(Nsrc*Msrc/2-i));
}
// A[0] = a2;
// A[1] = 0.;
// A[2] = 0.;
// A[3] = 0.;
switch (kernel) {
case 0:
*hD = *hH;
blas::copy(*yD, *hD);
blas::copy(*yH, *hH);
error = ERROR(y);
break;
case 1:
*lD = *lH;
blas::copy(*yD, *lD);
blas::copy(*yH, *lH);
error = ERROR(y);
break;
case 2:
*xD = *xH;
*yD = *yH;
blas::axpby(a, *xD, b, *yD);
blas::axpby(a, *xH, b, *yH);
error = ERROR(y);
break;
case 3:
*xD = *xH;
*yD = *yH;
blas::xpy(*xD, *yD);
blas::xpy(*xH, *yH);
error = ERROR(y);
break;
case 4:
*xD = *xH;
*yD = *yH;
blas::axpy(a, *xD, *yD);
blas::axpy(a, *xH, *yH);
*zH = *yD;
error = ERROR(y);
break;
case 5:
*xD = *xH;
*yD = *yH;
blas::xpay(*xD, a, *yD);
blas::xpay(*xH, a, *yH);
error = ERROR(y);
break;
case 6:
*xD = *xH;
*yD = *yH;
blas::mxpy(*xD, *yD);
blas::mxpy(*xH, *yH);
error = ERROR(y);
break;
case 7:
*xD = *xH;
blas::ax(a, *xD);
blas::ax(a, *xH);
error = ERROR(x);
break;
case 8:
*xD = *xH;
*yD = *yH;
blas::caxpy(a2, *xD, *yD);
blas::caxpy(a2, *xH, *yH);
error = ERROR(y);
break;
case 9:
*xD = *xH;
*yD = *yH;
blas::caxpby(a2, *xD, b2, *yD);
blas::caxpby(a2, *xH, b2, *yH);
error = ERROR(y);
break;
case 10:
*xD = *xH;
*yD = *yH;
*zD = *zH;
blas::cxpaypbz(*xD, a2, *yD, b2, *zD);
blas::cxpaypbz(*xH, a2, *yH, b2, *zH);
error = ERROR(z);
break;
case 11:
*xD = *xH;
*yD = *yH;
*zD = *zH;
blas::axpyBzpcx(a, *xD, *yD, b, *zD, c);
blas::axpyBzpcx(a, *xH, *yH, b, *zH, c);
error = ERROR(x) + ERROR(y);
break;
case 12:
*xD = *xH;
*yD = *yH;
*zD = *zH;
blas::axpyZpbx(a, *xD, *yD, *zD, b);
blas::axpyZpbx(a, *xH, *yH, *zH, b);
error = ERROR(x) + ERROR(y);
break;
case 13:
*xD = *xH;
*yD = *yH;
*zD = *zH;
*wD = *wH;
blas::caxpbypzYmbw(a2, *xD, b2, *yD, *zD, *wD);
blas::caxpbypzYmbw(a2, *xH, b2, *yH, *zH, *wH);
error = ERROR(z) + ERROR(y);
break;
case 14:
*xD = *xH;
*yD = *yH;
blas::cabxpyAx(a, b2, *xD, *yD);
blas::cabxpyAx(a, b2, *xH, *yH);
error = ERROR(y) + ERROR(x);
break;
case 15:
*xD = *xH;
*yD = *yH;
*zD = *zH;
{blas::caxpbypz(a2, *xD, b2, *yD, *zD);
blas::caxpbypz(a2, *xH, b2, *yH, *zH);
error = ERROR(z); }
break;
case 16:
*xD = *xH;
*yD = *yH;
*zD = *zH;
*wD = *wH;
{blas::caxpbypczpw(a2, *xD, b2, *yD, c2, *zD, *wD);
blas::caxpbypczpw(a2, *xH, b2, *yH, c2, *zH, *wH);
error = ERROR(w); }
break;
case 17:
*xD = *xH;
*yD = *yH;
*zD = *zH;
{blas::caxpyXmaz(a, *xD, *yD, *zD);
blas::caxpyXmaz(a, *xH, *yH, *zH);
error = ERROR(y) + ERROR(x);}
break;
// double
case 18:
*xD = *xH;
*yH = *xD;
error = fabs(blas::norm2(*xD) - blas::norm2(*xH)) / blas::norm2(*xH);
break;
case 19:
*xD = *xH;
*yD = *yH;
error = fabs(blas::reDotProduct(*xD, *yD) - blas::reDotProduct(*xH, *yH)) / fabs(blas::reDotProduct(*xH, *yH));
break;
case 20:
*xD = *xH;
*yD = *yH;
{double d = blas::axpyNorm(a, *xD, *yD);
double h = blas::axpyNorm(a, *xH, *yH);
error = ERROR(y) + fabs(d-h)/fabs(h);}
break;
case 21:
*xD = *xH;
*yD = *yH;
{double d = blas::xmyNorm(*xD, *yD);
double h = blas::xmyNorm(*xH, *yH);
error = ERROR(y) + fabs(d-h)/fabs(h);}
break;
case 22:
*xD = *xH;
*yD = *yH;
{double d = blas::caxpyNorm(a, *xD, *yD);
double h = blas::caxpyNorm(a, *xH, *yH);
error = ERROR(y) + fabs(d-h)/fabs(h);}
break;
case 23:
*xD = *xH;
*yD = *yH;
*zD = *zH;
{double d = blas::caxpyXmazNormX(a, *xD, *yD, *zD);
double h = blas::caxpyXmazNormX(a, *xH, *yH, *zH);
error = ERROR(y) + ERROR(x) + fabs(d-h)/fabs(h);}
break;
case 24:
*xD = *xH;
*yD = *yH;
{double d = blas::cabxpyAxNorm(a, b2, *xD, *yD);
double h = blas::cabxpyAxNorm(a, b2, *xH, *yH);
error = ERROR(x) + ERROR(y) + fabs(d-h)/fabs(h);}
break;
// double2
case 25:
*xD = *xH;
*yD = *yH;
error = abs(blas::cDotProduct(*xD, *yD) - blas::cDotProduct(*xH, *yH)) / abs(blas::cDotProduct(*xH, *yH));
break;
case 26:
*xD = *xH;
*yD = *yH;
*zD = *zH;
{ quda::Complex d = blas::xpaycDotzy(*xD, a, *yD, *zD);
quda::Complex h = blas::xpaycDotzy(*xH, a, *yH, *zH);
error = fabs(blas::norm2(*yD) - blas::norm2(*yH)) / blas::norm2(*yH) + abs(d-h)/abs(h);
}
break;
case 27:
*xD = *xH;
*yD = *yH;
*zD = *zH;
{quda::Complex d = blas::caxpyDotzy(a, *xD, *yD, *zD);
quda::Complex h = blas::caxpyDotzy(a, *xH, *yH, *zH);
error = ERROR(y) + abs(d-h)/abs(h);}
break;
// double3
case 28:
*xD = *xH;
*yD = *yH;
{ double3 d = blas::cDotProductNormA(*xD, *yD);
double3 h = blas::cDotProductNormA(*xH, *yH);
error = fabs(d.x - h.x) / fabs(h.x) + fabs(d.y - h.y) / fabs(h.y) + fabs(d.z - h.z) / fabs(h.z); }
break;
case 29:
*xD = *xH;
*yD = *yH;
{ double3 d = blas::cDotProductNormB(*xD, *yD);
double3 h = blas::cDotProductNormB(*xH, *yH);
error = fabs(d.x - h.x) / fabs(h.x) + fabs(d.y - h.y) / fabs(h.y) + fabs(d.z - h.z) / fabs(h.z); }
break;
case 30:
*xD = *xH;
*yD = *yH;
*zD = *zH;
*wD = *wH;
*vD = *vH;
{ double3 d = blas::caxpbypzYmbwcDotProductUYNormY(a2, *xD, b2, *yD, *zD, *wD, *vD);
double3 h = blas::caxpbypzYmbwcDotProductUYNormY(a2, *xH, b2, *yH, *zH, *wH, *vH);
error = ERROR(z) + ERROR(y) + fabs(d.x - h.x) / fabs(h.x) +
fabs(d.y - h.y) / fabs(h.y) + fabs(d.z - h.z) / fabs(h.z); }
break;
case 31:
*xD = *xH;
*yD = *yH;
{ double3 d = blas::HeavyQuarkResidualNorm(*xD, *yD);
double3 h = blas::HeavyQuarkResidualNorm(*xH, *yH);
error = fabs(d.x - h.x) / fabs(h.x) +
fabs(d.y - h.y) / fabs(h.y) + fabs(d.z - h.z) / fabs(h.z); }
break;
case 32:
*xD = *xH;
*yD = *yH;
*zD = *zH;
{ double3 d = blas::xpyHeavyQuarkResidualNorm(*xD, *yD, *zD);
double3 h = blas::xpyHeavyQuarkResidualNorm(*xH, *yH, *zH);
error = ERROR(y) + fabs(d.x - h.x) / fabs(h.x) +
fabs(d.y - h.y) / fabs(h.y) + fabs(d.z - h.z) / fabs(h.z); }
break;
case 33:
*xD = *xH;
*yD = *yH;
*zD = *zH;
{ double3 d = blas::tripleCGReduction(*xD, *yD, *zD);
double3 h = make_double3(blas::norm2(*xH), blas::norm2(*yH), blas::reDotProduct(*yH, *zH));
error = fabs(d.x - h.x) / fabs(h.x) +
fabs(d.y - h.y) / fabs(h.y) + fabs(d.z - h.z) / fabs(h.z); }
break;
case 34:
*xD = *xH;
*yD = *yH;
*zD = *zH;
*wD = *wH;
{ blas::tripleCGUpdate(a, b, *xD, *yD, *zD, *wD);
blas::tripleCGUpdate(a, b, *xH, *yH, *zH, *wH);
error = ERROR(y) + ERROR(z) + ERROR(w); }
break;
case 35:
for (int i=0; i < Nsrc; i++) xmD->Component(i) = *(xmH[i]);
for (int i=0; i < Msrc; i++) ymD->Component(i) = *(ymH[i]);
blas::caxpy(A, *xmD, *ymD);
for (int i=0; i < Nsrc; i++){
for(int j=0; j < Msrc; j++){
blas::caxpy(A[Msrc*i+j], *(xmH[i]), *(ymH[j]));
}
}
error = 0;
for (int i=0; i < Msrc; i++){
error+= fabs(blas::norm2((ymD->Component(i))) - blas::norm2(*(ymH[i]))) / blas::norm2(*(ymH[i]));
}
error/= Msrc;
break;
case 36:
for (int i=0; i < Nsrc; i++) {
xmD->Component(i) = *(xmH[i]);
zmD->Component(i) = *(zmH[i]);
}
*yD = *yH;
blas::axpyBzpcx((double*)A, xmD->Components(), zmD->Components(), (double*)B, *yD, (const double*)C);
for (int i=0; i<Nsrc; i++) {
blas::axpyBzpcx(((double*)A)[i], *xmH[i], *zmH[i], ((double*)B)[i], *yH, ((double*)C)[i]);
}
error = 0;
for (int i=0; i < Nsrc; i++){
error+= fabs(blas::norm2((xmD->Component(i))) - blas::norm2(*(xmH[i]))) / blas::norm2(*(xmH[i]));
//error+= fabs(blas::norm2((zmD->Component(i))) - blas::norm2(*(zmH[i]))) / blas::norm2(*(zmH[i]));
}
error/= Nsrc;
break;
default:
errorQuda("Undefined blas kernel %d\n", kernel);
}
delete[] A;
delete[] B;
delete[] C;
return error;
}
const char *prec_str[] = {"half", "single", "double"};
const char *names[] = {
"copyHS",
"copyLS",
"axpby",
"xpy",
"axpy",
"xpay",
"mxpy",
"ax",
"caxpy",
"caxpby",
"cxpaypbz",
"axpyBzpcx",
"axpyZpbx",
"caxpbypzYmbw",
"cabxpyAx",
"caxpbypz",
"caxpbypczpw",
"caxpyXmaz",
"norm",
"reDotProduct",
"axpyNorm",
"xmyNorm",
"caxpyNorm",
"caxpyXmazNormX",
"cabxpyAxNorm",
"cDotProduct",
"xpaycDotzy",
"caxpyDotzy",
"cDotProductNormA",
"cDotProductNormB",
"caxpbypzYmbwcDotProductUYNormY",
"HeavyQuarkResidualNorm",
"xpyHeavyQuarkResidualNorm",
"tripleCGReduction",
"tripleCGUpdate",
"caxpy (block)",
"axpyBzpcx (block)",
};
int main(int argc, char** argv)
{
for (int i = 1; i < argc; i++){
if(process_command_line_option(argc, argv, &i) == 0){
continue;
}
printfQuda("ERROR: Invalid option:%s\n", argv[i]);
usage(argv);
}
// override spin setting if mg solver is set to test coarse grids
if (inv_type == QUDA_MG_INVERTER) {
Nspin = 2;
Ncolor = nvec;
} else {
// set spin according to the type of dslash
Nspin = (dslash_type == QUDA_ASQTAD_DSLASH ||
dslash_type == QUDA_STAGGERED_DSLASH) ? 1 : 4;
Ncolor = 3;
}
setSpinorSiteSize(24);
initComms(argc, argv, gridsize_from_cmdline);
display_test_info();
initQuda(device);
// enable the tuning
setTuning(tune ? QUDA_TUNE_YES : QUDA_TUNE_NO);
setVerbosity(QUDA_SILENT);
for (int prec = 0; prec < Nprec; prec++) {
if (Nspin == 2 && prec == 0) continue;
printfQuda("\nBenchmarking %s precision with %d iterations...\n\n", prec_str[prec], niter);
initFields(prec);
for (int kernel = 0; kernel < Nkernels; kernel++) {
if (skip_kernel(prec, kernel)) continue;
// do the initial tune
benchmark(kernel, 1);
// now rerun with more iterations to get accurate speed measurements
quda::blas::flops = 0;
quda::blas::bytes = 0;
double secs = benchmark(kernel, niter);
double gflops = (quda::blas::flops*1e-9)/(secs);
double gbytes = quda::blas::bytes/(secs*1e9);
printfQuda("%-31s: Gflop/s = %6.1f, GB/s = %6.1f\n", names[kernel], gflops, gbytes);
}
freeFields();
}
// clear the error state
cudaGetLastError();
// lastly check for correctness
if (verify_results) {
::testing::InitGoogleTest(&argc, argv);
if (RUN_ALL_TESTS() != 0) warningQuda("Tests failed");
}
endQuda();
finalizeComms();
}
// The following tests each kernel at each precision using the google testing framework
class BlasTest : public ::testing::TestWithParam<int2> {
protected:
int2 param;
public:
virtual ~BlasTest() { }
virtual void SetUp() {
param = GetParam();
initFields(param.x);
}
virtual void TearDown() { freeFields(); }
virtual void NormalExit() { printf("monkey\n"); }
};
TEST_P(BlasTest, verify) {
int prec = param.x;
int kernel = param.y;
// certain tests will fail to run for coarse grids so mark these as
// failed without running
double deviation = skip_kernel(prec,kernel) ? 1.0 : test(kernel);
printfQuda("%-35s error = %e\n", names[kernel], deviation);
double tol = (prec == 2 ? 1e-11 : (prec == 1 ? 1e-5 : 1e-3));
tol = (kernel < 2) ? 1e-4 : tol; // use different tolerance for copy
EXPECT_LE(deviation, tol) << "CPU and CUDA implementations do not agree";
}
// half precision
INSTANTIATE_TEST_CASE_P(copyHS_half, BlasTest, ::testing::Values( make_int2(0,0) ));
INSTANTIATE_TEST_CASE_P(copyLS_half, BlasTest, ::testing::Values( make_int2(0,1) ));
INSTANTIATE_TEST_CASE_P(axpby_half, BlasTest, ::testing::Values( make_int2(0,2) ));
INSTANTIATE_TEST_CASE_P(xpy_half, BlasTest, ::testing::Values( make_int2(0,3) ));
INSTANTIATE_TEST_CASE_P(axpy_half, BlasTest, ::testing::Values( make_int2(0,4) ));
INSTANTIATE_TEST_CASE_P(xpay_half, BlasTest, ::testing::Values( make_int2(0,5) ));
INSTANTIATE_TEST_CASE_P(mxpy_half, BlasTest, ::testing::Values( make_int2(0,6) ));
INSTANTIATE_TEST_CASE_P(ax_half, BlasTest, ::testing::Values( make_int2(0,7) ));
INSTANTIATE_TEST_CASE_P(caxpy_half, BlasTest, ::testing::Values( make_int2(0,8) ));
INSTANTIATE_TEST_CASE_P(caxpby_half, BlasTest, ::testing::Values( make_int2(0,9) ));
INSTANTIATE_TEST_CASE_P(cxpaypbz_half, BlasTest, ::testing::Values( make_int2(0,10) ));
INSTANTIATE_TEST_CASE_P(axpyBzpcx_half, BlasTest, ::testing::Values( make_int2(0,11) ));
INSTANTIATE_TEST_CASE_P(axpyZpbx_half, BlasTest, ::testing::Values( make_int2(0,12) ));
INSTANTIATE_TEST_CASE_P(caxpbypzYmbw_half, BlasTest, ::testing::Values( make_int2(0,13) ));
INSTANTIATE_TEST_CASE_P(cabxpyAx_half, BlasTest, ::testing::Values( make_int2(0,14) ));
INSTANTIATE_TEST_CASE_P(caxpbypz_half, BlasTest, ::testing::Values( make_int2(0,15) ));
INSTANTIATE_TEST_CASE_P(caxpbypczpw_half, BlasTest, ::testing::Values( make_int2(0,16) ));
INSTANTIATE_TEST_CASE_P(caxpyXmaz_half, BlasTest, ::testing::Values( make_int2(0,17) ));
INSTANTIATE_TEST_CASE_P(norm2_half, BlasTest, ::testing::Values( make_int2(0,18) ));
INSTANTIATE_TEST_CASE_P(reDotProduct_half, BlasTest, ::testing::Values( make_int2(0,19) ));
INSTANTIATE_TEST_CASE_P(axpyNorm_half, BlasTest, ::testing::Values( make_int2(0,20) ));
INSTANTIATE_TEST_CASE_P(xmyNorm_half, BlasTest, ::testing::Values( make_int2(0,21) ));
INSTANTIATE_TEST_CASE_P(caxpyNorm_half, BlasTest, ::testing::Values( make_int2(0,22) ));
INSTANTIATE_TEST_CASE_P(caxpyXmazNormX_half, BlasTest, ::testing::Values( make_int2(0,23) ));
INSTANTIATE_TEST_CASE_P(cabxpyAxNorm_half, BlasTest, ::testing::Values( make_int2(0,24) ));
INSTANTIATE_TEST_CASE_P(cDotProduct_half, BlasTest, ::testing::Values( make_int2(0,25) ));
INSTANTIATE_TEST_CASE_P(xpaycDotzy_half, BlasTest, ::testing::Values( make_int2(0,26) ));
INSTANTIATE_TEST_CASE_P(caxpyDotzy_half, BlasTest, ::testing::Values( make_int2(0,27) ));
INSTANTIATE_TEST_CASE_P(cDotProductNormA_half, BlasTest, ::testing::Values( make_int2(0,28) ));
INSTANTIATE_TEST_CASE_P(cDotProductNormB_half, BlasTest, ::testing::Values( make_int2(0,29) ));
INSTANTIATE_TEST_CASE_P(caxpbypzYmbwcDotProductWYNormY_half, BlasTest, ::testing::Values( make_int2(0,30) ));
INSTANTIATE_TEST_CASE_P(HeavyQuarkResidualNorm_half, BlasTest, ::testing::Values( make_int2(0,31) ));
INSTANTIATE_TEST_CASE_P(xpyHeavyQuarkResidualNorm_half, BlasTest, ::testing::Values( make_int2(0,32) ));
INSTANTIATE_TEST_CASE_P(TripleCGReduction_half, BlasTest, ::testing::Values( make_int2(0,33) ));
INSTANTIATE_TEST_CASE_P(TripleCGUpdate_half, BlasTest, ::testing::Values( make_int2(0,34) ));
INSTANTIATE_TEST_CASE_P(multicaxpy_half, BlasTest, ::testing::Values( make_int2(0,35) ));
INSTANTIATE_TEST_CASE_P(multiaxpyBzpcx_half, BlasTest, ::testing::Values( make_int2(0,36) ));
// single precision
INSTANTIATE_TEST_CASE_P(copyHS_single, BlasTest, ::testing::Values( make_int2(1,0) ));
INSTANTIATE_TEST_CASE_P(copyLS_single, BlasTest, ::testing::Values( make_int2(1,1) ));
INSTANTIATE_TEST_CASE_P(axpby_single, BlasTest, ::testing::Values( make_int2(1,2) ));
INSTANTIATE_TEST_CASE_P(xpy_single, BlasTest, ::testing::Values( make_int2(1,3) ));
INSTANTIATE_TEST_CASE_P(axpy_single, BlasTest, ::testing::Values( make_int2(1,4) ));
INSTANTIATE_TEST_CASE_P(xpay_single, BlasTest, ::testing::Values( make_int2(1,5) ));
INSTANTIATE_TEST_CASE_P(mxpy_single, BlasTest, ::testing::Values( make_int2(1,6) ));
INSTANTIATE_TEST_CASE_P(ax_single, BlasTest, ::testing::Values( make_int2(1,7) ));
INSTANTIATE_TEST_CASE_P(caxpy_single, BlasTest, ::testing::Values( make_int2(1,8) ));
INSTANTIATE_TEST_CASE_P(caxpby_single, BlasTest, ::testing::Values( make_int2(1,9) ));
INSTANTIATE_TEST_CASE_P(cxpaypbz_single, BlasTest, ::testing::Values( make_int2(1,10) ));
INSTANTIATE_TEST_CASE_P(axpyBzpcx_single, BlasTest, ::testing::Values( make_int2(1,11) ));
INSTANTIATE_TEST_CASE_P(axpyZpbx_single, BlasTest, ::testing::Values( make_int2(1,12) ));
INSTANTIATE_TEST_CASE_P(caxpbypzYmbw_single, BlasTest, ::testing::Values( make_int2(1,13) ));
INSTANTIATE_TEST_CASE_P(cabxpyAx_single, BlasTest, ::testing::Values( make_int2(1,14) ));
INSTANTIATE_TEST_CASE_P(caxpbypz_single, BlasTest, ::testing::Values( make_int2(1,15) ));
INSTANTIATE_TEST_CASE_P(caxpbypczpw_single, BlasTest, ::testing::Values( make_int2(1,16) ));
INSTANTIATE_TEST_CASE_P(caxpyXmaz_single, BlasTest, ::testing::Values( make_int2(1,17) ));
INSTANTIATE_TEST_CASE_P(norm2_single, BlasTest, ::testing::Values( make_int2(1,18) ));
INSTANTIATE_TEST_CASE_P(reDotProduct_single, BlasTest, ::testing::Values( make_int2(1,19) ));
INSTANTIATE_TEST_CASE_P(axpyNorm_single, BlasTest, ::testing::Values( make_int2(1,20) ));
INSTANTIATE_TEST_CASE_P(xmyNorm_single, BlasTest, ::testing::Values( make_int2(1,21) ));
INSTANTIATE_TEST_CASE_P(caxpyNorm_single, BlasTest, ::testing::Values( make_int2(1,22) ));
INSTANTIATE_TEST_CASE_P(caxpyXmazNormX_single, BlasTest, ::testing::Values( make_int2(1,23) ));
INSTANTIATE_TEST_CASE_P(cabxpyAxNorm_single, BlasTest, ::testing::Values( make_int2(1,24) ));
INSTANTIATE_TEST_CASE_P(cDotProduct_single, BlasTest, ::testing::Values( make_int2(1,25) ));
INSTANTIATE_TEST_CASE_P(xpaycDotzy_single, BlasTest, ::testing::Values( make_int2(1,26) ));
INSTANTIATE_TEST_CASE_P(caxpyDotzy_single, BlasTest, ::testing::Values( make_int2(1,27) ));
INSTANTIATE_TEST_CASE_P(cDotProductNormA_single, BlasTest, ::testing::Values( make_int2(1,28) ));
INSTANTIATE_TEST_CASE_P(cDotProductNormB_single, BlasTest, ::testing::Values( make_int2(1,29) ));
INSTANTIATE_TEST_CASE_P(caxpbypzYmbwcDotProductWYNormY_single, BlasTest, ::testing::Values( make_int2(1,30) ));
INSTANTIATE_TEST_CASE_P(HeavyQuarkResidualNorm_single, BlasTest, ::testing::Values( make_int2(1,31) ));
INSTANTIATE_TEST_CASE_P(xpyHeavyQuarkResidualNorm_single, BlasTest, ::testing::Values( make_int2(1,32) ));
INSTANTIATE_TEST_CASE_P(TripleCGReduction_single, BlasTest, ::testing::Values( make_int2(1,33) ));
INSTANTIATE_TEST_CASE_P(TripleCGUpdate_single, BlasTest, ::testing::Values( make_int2(1,34) ));
INSTANTIATE_TEST_CASE_P(multicaxpy_single, BlasTest, ::testing::Values( make_int2(1,35) ));
INSTANTIATE_TEST_CASE_P(multiaxpyBzpcx_single, BlasTest, ::testing::Values( make_int2(1,36) ));
// double precision
INSTANTIATE_TEST_CASE_P(copyHS_double, BlasTest, ::testing::Values( make_int2(2,0) ));
INSTANTIATE_TEST_CASE_P(copyLS_double, BlasTest, ::testing::Values( make_int2(2,1) ));
INSTANTIATE_TEST_CASE_P(axpby_double, BlasTest, ::testing::Values( make_int2(2,2) ));
INSTANTIATE_TEST_CASE_P(xpy_double, BlasTest, ::testing::Values( make_int2(2,3) ));
INSTANTIATE_TEST_CASE_P(axpy_double, BlasTest, ::testing::Values( make_int2(2,4) ));
INSTANTIATE_TEST_CASE_P(xpay_double, BlasTest, ::testing::Values( make_int2(2,5) ));
INSTANTIATE_TEST_CASE_P(mxpy_double, BlasTest, ::testing::Values( make_int2(2,6) ));
INSTANTIATE_TEST_CASE_P(ax_double, BlasTest, ::testing::Values( make_int2(2,7) ));
INSTANTIATE_TEST_CASE_P(caxpy_double, BlasTest, ::testing::Values( make_int2(2,8) ));
INSTANTIATE_TEST_CASE_P(caxpby_double, BlasTest, ::testing::Values( make_int2(2,9) ));
INSTANTIATE_TEST_CASE_P(cxpaypbz_double, BlasTest, ::testing::Values( make_int2(2,10) ));
INSTANTIATE_TEST_CASE_P(axpyBzpcx_double, BlasTest, ::testing::Values( make_int2(2,11) ));
INSTANTIATE_TEST_CASE_P(axpyZpbx_double, BlasTest, ::testing::Values( make_int2(2,12) ));
INSTANTIATE_TEST_CASE_P(caxpbypzYmbw_double, BlasTest, ::testing::Values( make_int2(2,13) ));
INSTANTIATE_TEST_CASE_P(cabxpyAx_double, BlasTest, ::testing::Values( make_int2(2,14) ));
INSTANTIATE_TEST_CASE_P(caxpbypz_double, BlasTest, ::testing::Values( make_int2(2,15) ));
INSTANTIATE_TEST_CASE_P(caxpbypczpw_double, BlasTest, ::testing::Values( make_int2(2,16) ));
INSTANTIATE_TEST_CASE_P(caxpyXmaz_double, BlasTest, ::testing::Values( make_int2(2,17) ));
INSTANTIATE_TEST_CASE_P(norm2_double, BlasTest, ::testing::Values( make_int2(2,18) ));
INSTANTIATE_TEST_CASE_P(reDotProduct_double, BlasTest, ::testing::Values( make_int2(2,19) ));
INSTANTIATE_TEST_CASE_P(axpyNorm_double, BlasTest, ::testing::Values( make_int2(2,20) ));
INSTANTIATE_TEST_CASE_P(xmyNorm_double, BlasTest, ::testing::Values( make_int2(2,21) ));
INSTANTIATE_TEST_CASE_P(caxpyNorm_double, BlasTest, ::testing::Values( make_int2(2,22) ));
INSTANTIATE_TEST_CASE_P(caxpyXmazNormX_double, BlasTest, ::testing::Values( make_int2(2,23) ));
INSTANTIATE_TEST_CASE_P(cabxpyAxNorm_double, BlasTest, ::testing::Values( make_int2(2,24) ));
INSTANTIATE_TEST_CASE_P(cDotProduct_double, BlasTest, ::testing::Values( make_int2(2,25) ));
INSTANTIATE_TEST_CASE_P(xpaycDotzy_double, BlasTest, ::testing::Values( make_int2(2,26) ));
INSTANTIATE_TEST_CASE_P(caxpyDotzy_double, BlasTest, ::testing::Values( make_int2(2,27) ));
INSTANTIATE_TEST_CASE_P(cDotProductNormA_double, BlasTest, ::testing::Values( make_int2(2,28) ));
INSTANTIATE_TEST_CASE_P(cDotProductNormB_double, BlasTest, ::testing::Values( make_int2(2,29) ));
INSTANTIATE_TEST_CASE_P(caxpbypzYmbwcDotProductWYNormY_double, BlasTest, ::testing::Values( make_int2(2,30) ));
INSTANTIATE_TEST_CASE_P(HeavyQuarkResidualNorm_double, BlasTest, ::testing::Values( make_int2(2,31) ));
INSTANTIATE_TEST_CASE_P(xpyHeavyQuarkResidualNorm_double, BlasTest, ::testing::Values( make_int2(2,32) ));
INSTANTIATE_TEST_CASE_P(TripleCGReduction_double, BlasTest, ::testing::Values( make_int2(2,33) ));
INSTANTIATE_TEST_CASE_P(TripleCGUpdate_double, BlasTest, ::testing::Values( make_int2(2,34) ));
INSTANTIATE_TEST_CASE_P(multicaxpy_double, BlasTest, ::testing::Values( make_int2(2,35) ));
INSTANTIATE_TEST_CASE_P(multiaxpyBzpcx_double, BlasTest, ::testing::Values( make_int2(2,36) ));
|
02c30365059a3d1ad4de1f3aaec171f5765989f0.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
//
// auto-generated by ops.py
//
__constant__ int xdim0_advec_mom_kernel_x2;
int xdim0_advec_mom_kernel_x2_h = -1;
__constant__ int ydim0_advec_mom_kernel_x2;
int ydim0_advec_mom_kernel_x2_h = -1;
__constant__ int xdim1_advec_mom_kernel_x2;
int xdim1_advec_mom_kernel_x2_h = -1;
__constant__ int ydim1_advec_mom_kernel_x2;
int ydim1_advec_mom_kernel_x2_h = -1;
__constant__ int xdim2_advec_mom_kernel_x2;
int xdim2_advec_mom_kernel_x2_h = -1;
__constant__ int ydim2_advec_mom_kernel_x2;
int ydim2_advec_mom_kernel_x2_h = -1;
__constant__ int xdim3_advec_mom_kernel_x2;
int xdim3_advec_mom_kernel_x2_h = -1;
__constant__ int ydim3_advec_mom_kernel_x2;
int ydim3_advec_mom_kernel_x2_h = -1;
__constant__ int xdim4_advec_mom_kernel_x2;
int xdim4_advec_mom_kernel_x2_h = -1;
__constant__ int ydim4_advec_mom_kernel_x2;
int ydim4_advec_mom_kernel_x2_h = -1;
#undef OPS_ACC0
#undef OPS_ACC1
#undef OPS_ACC2
#undef OPS_ACC3
#undef OPS_ACC4
#define OPS_ACC0(x, y, z) \
(x + xdim0_advec_mom_kernel_x2 * (y) + \
xdim0_advec_mom_kernel_x2 * ydim0_advec_mom_kernel_x2 * (z))
#define OPS_ACC1(x, y, z) \
(x + xdim1_advec_mom_kernel_x2 * (y) + \
xdim1_advec_mom_kernel_x2 * ydim1_advec_mom_kernel_x2 * (z))
#define OPS_ACC2(x, y, z) \
(x + xdim2_advec_mom_kernel_x2 * (y) + \
xdim2_advec_mom_kernel_x2 * ydim2_advec_mom_kernel_x2 * (z))
#define OPS_ACC3(x, y, z) \
(x + xdim3_advec_mom_kernel_x2 * (y) + \
xdim3_advec_mom_kernel_x2 * ydim3_advec_mom_kernel_x2 * (z))
#define OPS_ACC4(x, y, z) \
(x + xdim4_advec_mom_kernel_x2 * (y) + \
xdim4_advec_mom_kernel_x2 * ydim4_advec_mom_kernel_x2 * (z))
// user function
__device__
inline void
advec_mom_kernel_x2_gpu(double *pre_vol, double *post_vol,
const double *volume, const double *vol_flux_y,
const double *vol_flux_z) {
post_vol[OPS_ACC1(0, 0, 0)] = volume[OPS_ACC2(0, 0, 0)] +
vol_flux_z[OPS_ACC4(0, 0, 1)] -
vol_flux_z[OPS_ACC4(0, 0, 0)];
pre_vol[OPS_ACC0(0, 0, 0)] = post_vol[OPS_ACC1(0, 0, 0)] +
vol_flux_y[OPS_ACC3(0, 1, 0)] -
vol_flux_y[OPS_ACC3(0, 0, 0)];
}
#undef OPS_ACC0
#undef OPS_ACC1
#undef OPS_ACC2
#undef OPS_ACC3
#undef OPS_ACC4
__global__ void ops_advec_mom_kernel_x2(double *__restrict arg0,
double *__restrict arg1,
const double *__restrict arg2,
const double *__restrict arg3,
const double *__restrict arg4,
int size0, int size1, int size2) {
int idx_z = blockDim.z * blockIdx.z + threadIdx.z;
int idx_y = blockDim.y * blockIdx.y + threadIdx.y;
int idx_x = blockDim.x * blockIdx.x + threadIdx.x;
arg0 += idx_x * 1 * 1 + idx_y * 1 * 1 * xdim0_advec_mom_kernel_x2 +
idx_z * 1 * 1 * xdim0_advec_mom_kernel_x2 * ydim0_advec_mom_kernel_x2;
arg1 += idx_x * 1 * 1 + idx_y * 1 * 1 * xdim1_advec_mom_kernel_x2 +
idx_z * 1 * 1 * xdim1_advec_mom_kernel_x2 * ydim1_advec_mom_kernel_x2;
arg2 += idx_x * 1 * 1 + idx_y * 1 * 1 * xdim2_advec_mom_kernel_x2 +
idx_z * 1 * 1 * xdim2_advec_mom_kernel_x2 * ydim2_advec_mom_kernel_x2;
arg3 += idx_x * 1 * 1 + idx_y * 1 * 1 * xdim3_advec_mom_kernel_x2 +
idx_z * 1 * 1 * xdim3_advec_mom_kernel_x2 * ydim3_advec_mom_kernel_x2;
arg4 += idx_x * 1 * 1 + idx_y * 1 * 1 * xdim4_advec_mom_kernel_x2 +
idx_z * 1 * 1 * xdim4_advec_mom_kernel_x2 * ydim4_advec_mom_kernel_x2;
if (idx_x < size0 && idx_y < size1 && idx_z < size2) {
advec_mom_kernel_x2_gpu(arg0, arg1, arg2, arg3, arg4);
}
}
// host stub function
void ops_par_loop_advec_mom_kernel_x2(char const *name, ops_block block,
int dim, int *range, ops_arg arg0,
ops_arg arg1, ops_arg arg2, ops_arg arg3,
ops_arg arg4) {
// Timing
double t1, t2, c1, c2;
ops_arg args[5] = {arg0, arg1, arg2, arg3, arg4};
#ifdef CHECKPOINTING
if (!ops_checkpointing_before(args, 5, range, 21))
return;
#endif
if (OPS_diags > 1) {
ops_timing_realloc(21, "advec_mom_kernel_x2");
OPS_kernels[21].count++;
ops_timers_core(&c1, &t1);
}
// compute locally allocated range for the sub-block
int start[3];
int end[3];
#ifdef OPS_MPI
sub_block_list sb = OPS_sub_block_list[block->index];
if (!sb->owned)
return;
for (int n = 0; n < 3; n++) {
start[n] = sb->decomp_disp[n];
end[n] = sb->decomp_disp[n] + sb->decomp_size[n];
if (start[n] >= range[2 * n]) {
start[n] = 0;
} else {
start[n] = range[2 * n] - start[n];
}
if (sb->id_m[n] == MPI_PROC_NULL && range[2 * n] < 0)
start[n] = range[2 * n];
if (end[n] >= range[2 * n + 1]) {
end[n] = range[2 * n + 1] - sb->decomp_disp[n];
} else {
end[n] = sb->decomp_size[n];
}
if (sb->id_p[n] == MPI_PROC_NULL &&
(range[2 * n + 1] > sb->decomp_disp[n] + sb->decomp_size[n]))
end[n] += (range[2 * n + 1] - sb->decomp_disp[n] - sb->decomp_size[n]);
}
#else
for (int n = 0; n < 3; n++) {
start[n] = range[2 * n];
end[n] = range[2 * n + 1];
}
#endif
int x_size = MAX(0, end[0] - start[0]);
int y_size = MAX(0, end[1] - start[1]);
int z_size = MAX(0, end[2] - start[2]);
int xdim0 = args[0].dat->size[0];
int ydim0 = args[0].dat->size[1];
int xdim1 = args[1].dat->size[0];
int ydim1 = args[1].dat->size[1];
int xdim2 = args[2].dat->size[0];
int ydim2 = args[2].dat->size[1];
int xdim3 = args[3].dat->size[0];
int ydim3 = args[3].dat->size[1];
int xdim4 = args[4].dat->size[0];
int ydim4 = args[4].dat->size[1];
if (xdim0 != xdim0_advec_mom_kernel_x2_h ||
ydim0 != ydim0_advec_mom_kernel_x2_h ||
xdim1 != xdim1_advec_mom_kernel_x2_h ||
ydim1 != ydim1_advec_mom_kernel_x2_h ||
xdim2 != xdim2_advec_mom_kernel_x2_h ||
ydim2 != ydim2_advec_mom_kernel_x2_h ||
xdim3 != xdim3_advec_mom_kernel_x2_h ||
ydim3 != ydim3_advec_mom_kernel_x2_h ||
xdim4 != xdim4_advec_mom_kernel_x2_h ||
ydim4 != ydim4_advec_mom_kernel_x2_h) {
hipMemcpyToSymbol(xdim0_advec_mom_kernel_x2, &xdim0, sizeof(int));
xdim0_advec_mom_kernel_x2_h = xdim0;
hipMemcpyToSymbol(ydim0_advec_mom_kernel_x2, &ydim0, sizeof(int));
ydim0_advec_mom_kernel_x2_h = ydim0;
hipMemcpyToSymbol(xdim1_advec_mom_kernel_x2, &xdim1, sizeof(int));
xdim1_advec_mom_kernel_x2_h = xdim1;
hipMemcpyToSymbol(ydim1_advec_mom_kernel_x2, &ydim1, sizeof(int));
ydim1_advec_mom_kernel_x2_h = ydim1;
hipMemcpyToSymbol(xdim2_advec_mom_kernel_x2, &xdim2, sizeof(int));
xdim2_advec_mom_kernel_x2_h = xdim2;
hipMemcpyToSymbol(ydim2_advec_mom_kernel_x2, &ydim2, sizeof(int));
ydim2_advec_mom_kernel_x2_h = ydim2;
hipMemcpyToSymbol(xdim3_advec_mom_kernel_x2, &xdim3, sizeof(int));
xdim3_advec_mom_kernel_x2_h = xdim3;
hipMemcpyToSymbol(ydim3_advec_mom_kernel_x2, &ydim3, sizeof(int));
ydim3_advec_mom_kernel_x2_h = ydim3;
hipMemcpyToSymbol(xdim4_advec_mom_kernel_x2, &xdim4, sizeof(int));
xdim4_advec_mom_kernel_x2_h = xdim4;
hipMemcpyToSymbol(ydim4_advec_mom_kernel_x2, &ydim4, sizeof(int));
ydim4_advec_mom_kernel_x2_h = ydim4;
}
dim3 grid((x_size - 1) / OPS_block_size_x + 1,
(y_size - 1) / OPS_block_size_y + 1, z_size);
dim3 tblock(OPS_block_size_x, OPS_block_size_y, 1);
int dat0 = args[0].dat->elem_size;
int dat1 = args[1].dat->elem_size;
int dat2 = args[2].dat->elem_size;
int dat3 = args[3].dat->elem_size;
int dat4 = args[4].dat->elem_size;
char *p_a[5];
// set up initial pointers
int d_m[OPS_MAX_DIM];
#ifdef OPS_MPI
for (int d = 0; d < dim; d++)
d_m[d] =
args[0].dat->d_m[d] + OPS_sub_dat_list[args[0].dat->index]->d_im[d];
#else
for (int d = 0; d < dim; d++)
d_m[d] = args[0].dat->d_m[d];
#endif
int base0 = dat0 * 1 * (start[0] * args[0].stencil->stride[0] -
args[0].dat->base[0] - d_m[0]);
base0 = base0 +
dat0 * args[0].dat->size[0] * (start[1] * args[0].stencil->stride[1] -
args[0].dat->base[1] - d_m[1]);
base0 = base0 +
dat0 * args[0].dat->size[0] * args[0].dat->size[1] *
(start[2] * args[0].stencil->stride[2] - args[0].dat->base[2] -
d_m[2]);
p_a[0] = (char *)args[0].data_d + base0;
#ifdef OPS_MPI
for (int d = 0; d < dim; d++)
d_m[d] =
args[1].dat->d_m[d] + OPS_sub_dat_list[args[1].dat->index]->d_im[d];
#else
for (int d = 0; d < dim; d++)
d_m[d] = args[1].dat->d_m[d];
#endif
int base1 = dat1 * 1 * (start[0] * args[1].stencil->stride[0] -
args[1].dat->base[0] - d_m[0]);
base1 = base1 +
dat1 * args[1].dat->size[0] * (start[1] * args[1].stencil->stride[1] -
args[1].dat->base[1] - d_m[1]);
base1 = base1 +
dat1 * args[1].dat->size[0] * args[1].dat->size[1] *
(start[2] * args[1].stencil->stride[2] - args[1].dat->base[2] -
d_m[2]);
p_a[1] = (char *)args[1].data_d + base1;
#ifdef OPS_MPI
for (int d = 0; d < dim; d++)
d_m[d] =
args[2].dat->d_m[d] + OPS_sub_dat_list[args[2].dat->index]->d_im[d];
#else
for (int d = 0; d < dim; d++)
d_m[d] = args[2].dat->d_m[d];
#endif
int base2 = dat2 * 1 * (start[0] * args[2].stencil->stride[0] -
args[2].dat->base[0] - d_m[0]);
base2 = base2 +
dat2 * args[2].dat->size[0] * (start[1] * args[2].stencil->stride[1] -
args[2].dat->base[1] - d_m[1]);
base2 = base2 +
dat2 * args[2].dat->size[0] * args[2].dat->size[1] *
(start[2] * args[2].stencil->stride[2] - args[2].dat->base[2] -
d_m[2]);
p_a[2] = (char *)args[2].data_d + base2;
#ifdef OPS_MPI
for (int d = 0; d < dim; d++)
d_m[d] =
args[3].dat->d_m[d] + OPS_sub_dat_list[args[3].dat->index]->d_im[d];
#else
for (int d = 0; d < dim; d++)
d_m[d] = args[3].dat->d_m[d];
#endif
int base3 = dat3 * 1 * (start[0] * args[3].stencil->stride[0] -
args[3].dat->base[0] - d_m[0]);
base3 = base3 +
dat3 * args[3].dat->size[0] * (start[1] * args[3].stencil->stride[1] -
args[3].dat->base[1] - d_m[1]);
base3 = base3 +
dat3 * args[3].dat->size[0] * args[3].dat->size[1] *
(start[2] * args[3].stencil->stride[2] - args[3].dat->base[2] -
d_m[2]);
p_a[3] = (char *)args[3].data_d + base3;
#ifdef OPS_MPI
for (int d = 0; d < dim; d++)
d_m[d] =
args[4].dat->d_m[d] + OPS_sub_dat_list[args[4].dat->index]->d_im[d];
#else
for (int d = 0; d < dim; d++)
d_m[d] = args[4].dat->d_m[d];
#endif
int base4 = dat4 * 1 * (start[0] * args[4].stencil->stride[0] -
args[4].dat->base[0] - d_m[0]);
base4 = base4 +
dat4 * args[4].dat->size[0] * (start[1] * args[4].stencil->stride[1] -
args[4].dat->base[1] - d_m[1]);
base4 = base4 +
dat4 * args[4].dat->size[0] * args[4].dat->size[1] *
(start[2] * args[4].stencil->stride[2] - args[4].dat->base[2] -
d_m[2]);
p_a[4] = (char *)args[4].data_d + base4;
ops_H_D_exchanges_device(args, 5);
ops_halo_exchanges(args, 5, range);
if (OPS_diags > 1) {
ops_timers_core(&c2, &t2);
OPS_kernels[21].mpi_time += t2 - t1;
}
// call kernel wrapper function, passing in pointers to data
hipLaunchKernelGGL(( ops_advec_mom_kernel_x2), dim3(grid), dim3(tblock), 0, 0,
(double *)p_a[0], (double *)p_a[1], (double *)p_a[2], (double *)p_a[3],
(double *)p_a[4], x_size, y_size, z_size);
if (OPS_diags > 1) {
cutilSafeCall(hipDeviceSynchronize());
ops_timers_core(&c1, &t1);
OPS_kernels[21].time += t1 - t2;
}
ops_set_dirtybit_device(args, 5);
ops_set_halo_dirtybit3(&args[0], range);
ops_set_halo_dirtybit3(&args[1], range);
if (OPS_diags > 1) {
// Update kernel record
ops_timers_core(&c2, &t2);
OPS_kernels[21].mpi_time += t2 - t1;
OPS_kernels[21].transfer += ops_compute_transfer(dim, start, end, &arg0);
OPS_kernels[21].transfer += ops_compute_transfer(dim, start, end, &arg1);
OPS_kernels[21].transfer += ops_compute_transfer(dim, start, end, &arg2);
OPS_kernels[21].transfer += ops_compute_transfer(dim, start, end, &arg3);
OPS_kernels[21].transfer += ops_compute_transfer(dim, start, end, &arg4);
}
}
| 02c30365059a3d1ad4de1f3aaec171f5765989f0.cu | //
// auto-generated by ops.py
//
__constant__ int xdim0_advec_mom_kernel_x2;
int xdim0_advec_mom_kernel_x2_h = -1;
__constant__ int ydim0_advec_mom_kernel_x2;
int ydim0_advec_mom_kernel_x2_h = -1;
__constant__ int xdim1_advec_mom_kernel_x2;
int xdim1_advec_mom_kernel_x2_h = -1;
__constant__ int ydim1_advec_mom_kernel_x2;
int ydim1_advec_mom_kernel_x2_h = -1;
__constant__ int xdim2_advec_mom_kernel_x2;
int xdim2_advec_mom_kernel_x2_h = -1;
__constant__ int ydim2_advec_mom_kernel_x2;
int ydim2_advec_mom_kernel_x2_h = -1;
__constant__ int xdim3_advec_mom_kernel_x2;
int xdim3_advec_mom_kernel_x2_h = -1;
__constant__ int ydim3_advec_mom_kernel_x2;
int ydim3_advec_mom_kernel_x2_h = -1;
__constant__ int xdim4_advec_mom_kernel_x2;
int xdim4_advec_mom_kernel_x2_h = -1;
__constant__ int ydim4_advec_mom_kernel_x2;
int ydim4_advec_mom_kernel_x2_h = -1;
#undef OPS_ACC0
#undef OPS_ACC1
#undef OPS_ACC2
#undef OPS_ACC3
#undef OPS_ACC4
#define OPS_ACC0(x, y, z) \
(x + xdim0_advec_mom_kernel_x2 * (y) + \
xdim0_advec_mom_kernel_x2 * ydim0_advec_mom_kernel_x2 * (z))
#define OPS_ACC1(x, y, z) \
(x + xdim1_advec_mom_kernel_x2 * (y) + \
xdim1_advec_mom_kernel_x2 * ydim1_advec_mom_kernel_x2 * (z))
#define OPS_ACC2(x, y, z) \
(x + xdim2_advec_mom_kernel_x2 * (y) + \
xdim2_advec_mom_kernel_x2 * ydim2_advec_mom_kernel_x2 * (z))
#define OPS_ACC3(x, y, z) \
(x + xdim3_advec_mom_kernel_x2 * (y) + \
xdim3_advec_mom_kernel_x2 * ydim3_advec_mom_kernel_x2 * (z))
#define OPS_ACC4(x, y, z) \
(x + xdim4_advec_mom_kernel_x2 * (y) + \
xdim4_advec_mom_kernel_x2 * ydim4_advec_mom_kernel_x2 * (z))
// user function
__device__
inline void
advec_mom_kernel_x2_gpu(double *pre_vol, double *post_vol,
const double *volume, const double *vol_flux_y,
const double *vol_flux_z) {
post_vol[OPS_ACC1(0, 0, 0)] = volume[OPS_ACC2(0, 0, 0)] +
vol_flux_z[OPS_ACC4(0, 0, 1)] -
vol_flux_z[OPS_ACC4(0, 0, 0)];
pre_vol[OPS_ACC0(0, 0, 0)] = post_vol[OPS_ACC1(0, 0, 0)] +
vol_flux_y[OPS_ACC3(0, 1, 0)] -
vol_flux_y[OPS_ACC3(0, 0, 0)];
}
#undef OPS_ACC0
#undef OPS_ACC1
#undef OPS_ACC2
#undef OPS_ACC3
#undef OPS_ACC4
__global__ void ops_advec_mom_kernel_x2(double *__restrict arg0,
double *__restrict arg1,
const double *__restrict arg2,
const double *__restrict arg3,
const double *__restrict arg4,
int size0, int size1, int size2) {
int idx_z = blockDim.z * blockIdx.z + threadIdx.z;
int idx_y = blockDim.y * blockIdx.y + threadIdx.y;
int idx_x = blockDim.x * blockIdx.x + threadIdx.x;
arg0 += idx_x * 1 * 1 + idx_y * 1 * 1 * xdim0_advec_mom_kernel_x2 +
idx_z * 1 * 1 * xdim0_advec_mom_kernel_x2 * ydim0_advec_mom_kernel_x2;
arg1 += idx_x * 1 * 1 + idx_y * 1 * 1 * xdim1_advec_mom_kernel_x2 +
idx_z * 1 * 1 * xdim1_advec_mom_kernel_x2 * ydim1_advec_mom_kernel_x2;
arg2 += idx_x * 1 * 1 + idx_y * 1 * 1 * xdim2_advec_mom_kernel_x2 +
idx_z * 1 * 1 * xdim2_advec_mom_kernel_x2 * ydim2_advec_mom_kernel_x2;
arg3 += idx_x * 1 * 1 + idx_y * 1 * 1 * xdim3_advec_mom_kernel_x2 +
idx_z * 1 * 1 * xdim3_advec_mom_kernel_x2 * ydim3_advec_mom_kernel_x2;
arg4 += idx_x * 1 * 1 + idx_y * 1 * 1 * xdim4_advec_mom_kernel_x2 +
idx_z * 1 * 1 * xdim4_advec_mom_kernel_x2 * ydim4_advec_mom_kernel_x2;
if (idx_x < size0 && idx_y < size1 && idx_z < size2) {
advec_mom_kernel_x2_gpu(arg0, arg1, arg2, arg3, arg4);
}
}
// host stub function
void ops_par_loop_advec_mom_kernel_x2(char const *name, ops_block block,
int dim, int *range, ops_arg arg0,
ops_arg arg1, ops_arg arg2, ops_arg arg3,
ops_arg arg4) {
// Timing
double t1, t2, c1, c2;
ops_arg args[5] = {arg0, arg1, arg2, arg3, arg4};
#ifdef CHECKPOINTING
if (!ops_checkpointing_before(args, 5, range, 21))
return;
#endif
if (OPS_diags > 1) {
ops_timing_realloc(21, "advec_mom_kernel_x2");
OPS_kernels[21].count++;
ops_timers_core(&c1, &t1);
}
// compute locally allocated range for the sub-block
int start[3];
int end[3];
#ifdef OPS_MPI
sub_block_list sb = OPS_sub_block_list[block->index];
if (!sb->owned)
return;
for (int n = 0; n < 3; n++) {
start[n] = sb->decomp_disp[n];
end[n] = sb->decomp_disp[n] + sb->decomp_size[n];
if (start[n] >= range[2 * n]) {
start[n] = 0;
} else {
start[n] = range[2 * n] - start[n];
}
if (sb->id_m[n] == MPI_PROC_NULL && range[2 * n] < 0)
start[n] = range[2 * n];
if (end[n] >= range[2 * n + 1]) {
end[n] = range[2 * n + 1] - sb->decomp_disp[n];
} else {
end[n] = sb->decomp_size[n];
}
if (sb->id_p[n] == MPI_PROC_NULL &&
(range[2 * n + 1] > sb->decomp_disp[n] + sb->decomp_size[n]))
end[n] += (range[2 * n + 1] - sb->decomp_disp[n] - sb->decomp_size[n]);
}
#else
for (int n = 0; n < 3; n++) {
start[n] = range[2 * n];
end[n] = range[2 * n + 1];
}
#endif
int x_size = MAX(0, end[0] - start[0]);
int y_size = MAX(0, end[1] - start[1]);
int z_size = MAX(0, end[2] - start[2]);
int xdim0 = args[0].dat->size[0];
int ydim0 = args[0].dat->size[1];
int xdim1 = args[1].dat->size[0];
int ydim1 = args[1].dat->size[1];
int xdim2 = args[2].dat->size[0];
int ydim2 = args[2].dat->size[1];
int xdim3 = args[3].dat->size[0];
int ydim3 = args[3].dat->size[1];
int xdim4 = args[4].dat->size[0];
int ydim4 = args[4].dat->size[1];
if (xdim0 != xdim0_advec_mom_kernel_x2_h ||
ydim0 != ydim0_advec_mom_kernel_x2_h ||
xdim1 != xdim1_advec_mom_kernel_x2_h ||
ydim1 != ydim1_advec_mom_kernel_x2_h ||
xdim2 != xdim2_advec_mom_kernel_x2_h ||
ydim2 != ydim2_advec_mom_kernel_x2_h ||
xdim3 != xdim3_advec_mom_kernel_x2_h ||
ydim3 != ydim3_advec_mom_kernel_x2_h ||
xdim4 != xdim4_advec_mom_kernel_x2_h ||
ydim4 != ydim4_advec_mom_kernel_x2_h) {
cudaMemcpyToSymbol(xdim0_advec_mom_kernel_x2, &xdim0, sizeof(int));
xdim0_advec_mom_kernel_x2_h = xdim0;
cudaMemcpyToSymbol(ydim0_advec_mom_kernel_x2, &ydim0, sizeof(int));
ydim0_advec_mom_kernel_x2_h = ydim0;
cudaMemcpyToSymbol(xdim1_advec_mom_kernel_x2, &xdim1, sizeof(int));
xdim1_advec_mom_kernel_x2_h = xdim1;
cudaMemcpyToSymbol(ydim1_advec_mom_kernel_x2, &ydim1, sizeof(int));
ydim1_advec_mom_kernel_x2_h = ydim1;
cudaMemcpyToSymbol(xdim2_advec_mom_kernel_x2, &xdim2, sizeof(int));
xdim2_advec_mom_kernel_x2_h = xdim2;
cudaMemcpyToSymbol(ydim2_advec_mom_kernel_x2, &ydim2, sizeof(int));
ydim2_advec_mom_kernel_x2_h = ydim2;
cudaMemcpyToSymbol(xdim3_advec_mom_kernel_x2, &xdim3, sizeof(int));
xdim3_advec_mom_kernel_x2_h = xdim3;
cudaMemcpyToSymbol(ydim3_advec_mom_kernel_x2, &ydim3, sizeof(int));
ydim3_advec_mom_kernel_x2_h = ydim3;
cudaMemcpyToSymbol(xdim4_advec_mom_kernel_x2, &xdim4, sizeof(int));
xdim4_advec_mom_kernel_x2_h = xdim4;
cudaMemcpyToSymbol(ydim4_advec_mom_kernel_x2, &ydim4, sizeof(int));
ydim4_advec_mom_kernel_x2_h = ydim4;
}
dim3 grid((x_size - 1) / OPS_block_size_x + 1,
(y_size - 1) / OPS_block_size_y + 1, z_size);
dim3 tblock(OPS_block_size_x, OPS_block_size_y, 1);
int dat0 = args[0].dat->elem_size;
int dat1 = args[1].dat->elem_size;
int dat2 = args[2].dat->elem_size;
int dat3 = args[3].dat->elem_size;
int dat4 = args[4].dat->elem_size;
char *p_a[5];
// set up initial pointers
int d_m[OPS_MAX_DIM];
#ifdef OPS_MPI
for (int d = 0; d < dim; d++)
d_m[d] =
args[0].dat->d_m[d] + OPS_sub_dat_list[args[0].dat->index]->d_im[d];
#else
for (int d = 0; d < dim; d++)
d_m[d] = args[0].dat->d_m[d];
#endif
int base0 = dat0 * 1 * (start[0] * args[0].stencil->stride[0] -
args[0].dat->base[0] - d_m[0]);
base0 = base0 +
dat0 * args[0].dat->size[0] * (start[1] * args[0].stencil->stride[1] -
args[0].dat->base[1] - d_m[1]);
base0 = base0 +
dat0 * args[0].dat->size[0] * args[0].dat->size[1] *
(start[2] * args[0].stencil->stride[2] - args[0].dat->base[2] -
d_m[2]);
p_a[0] = (char *)args[0].data_d + base0;
#ifdef OPS_MPI
for (int d = 0; d < dim; d++)
d_m[d] =
args[1].dat->d_m[d] + OPS_sub_dat_list[args[1].dat->index]->d_im[d];
#else
for (int d = 0; d < dim; d++)
d_m[d] = args[1].dat->d_m[d];
#endif
int base1 = dat1 * 1 * (start[0] * args[1].stencil->stride[0] -
args[1].dat->base[0] - d_m[0]);
base1 = base1 +
dat1 * args[1].dat->size[0] * (start[1] * args[1].stencil->stride[1] -
args[1].dat->base[1] - d_m[1]);
base1 = base1 +
dat1 * args[1].dat->size[0] * args[1].dat->size[1] *
(start[2] * args[1].stencil->stride[2] - args[1].dat->base[2] -
d_m[2]);
p_a[1] = (char *)args[1].data_d + base1;
#ifdef OPS_MPI
for (int d = 0; d < dim; d++)
d_m[d] =
args[2].dat->d_m[d] + OPS_sub_dat_list[args[2].dat->index]->d_im[d];
#else
for (int d = 0; d < dim; d++)
d_m[d] = args[2].dat->d_m[d];
#endif
int base2 = dat2 * 1 * (start[0] * args[2].stencil->stride[0] -
args[2].dat->base[0] - d_m[0]);
base2 = base2 +
dat2 * args[2].dat->size[0] * (start[1] * args[2].stencil->stride[1] -
args[2].dat->base[1] - d_m[1]);
base2 = base2 +
dat2 * args[2].dat->size[0] * args[2].dat->size[1] *
(start[2] * args[2].stencil->stride[2] - args[2].dat->base[2] -
d_m[2]);
p_a[2] = (char *)args[2].data_d + base2;
#ifdef OPS_MPI
for (int d = 0; d < dim; d++)
d_m[d] =
args[3].dat->d_m[d] + OPS_sub_dat_list[args[3].dat->index]->d_im[d];
#else
for (int d = 0; d < dim; d++)
d_m[d] = args[3].dat->d_m[d];
#endif
int base3 = dat3 * 1 * (start[0] * args[3].stencil->stride[0] -
args[3].dat->base[0] - d_m[0]);
base3 = base3 +
dat3 * args[3].dat->size[0] * (start[1] * args[3].stencil->stride[1] -
args[3].dat->base[1] - d_m[1]);
base3 = base3 +
dat3 * args[3].dat->size[0] * args[3].dat->size[1] *
(start[2] * args[3].stencil->stride[2] - args[3].dat->base[2] -
d_m[2]);
p_a[3] = (char *)args[3].data_d + base3;
#ifdef OPS_MPI
for (int d = 0; d < dim; d++)
d_m[d] =
args[4].dat->d_m[d] + OPS_sub_dat_list[args[4].dat->index]->d_im[d];
#else
for (int d = 0; d < dim; d++)
d_m[d] = args[4].dat->d_m[d];
#endif
int base4 = dat4 * 1 * (start[0] * args[4].stencil->stride[0] -
args[4].dat->base[0] - d_m[0]);
base4 = base4 +
dat4 * args[4].dat->size[0] * (start[1] * args[4].stencil->stride[1] -
args[4].dat->base[1] - d_m[1]);
base4 = base4 +
dat4 * args[4].dat->size[0] * args[4].dat->size[1] *
(start[2] * args[4].stencil->stride[2] - args[4].dat->base[2] -
d_m[2]);
p_a[4] = (char *)args[4].data_d + base4;
ops_H_D_exchanges_device(args, 5);
ops_halo_exchanges(args, 5, range);
if (OPS_diags > 1) {
ops_timers_core(&c2, &t2);
OPS_kernels[21].mpi_time += t2 - t1;
}
// call kernel wrapper function, passing in pointers to data
ops_advec_mom_kernel_x2<<<grid, tblock>>>(
(double *)p_a[0], (double *)p_a[1], (double *)p_a[2], (double *)p_a[3],
(double *)p_a[4], x_size, y_size, z_size);
if (OPS_diags > 1) {
cutilSafeCall(cudaDeviceSynchronize());
ops_timers_core(&c1, &t1);
OPS_kernels[21].time += t1 - t2;
}
ops_set_dirtybit_device(args, 5);
ops_set_halo_dirtybit3(&args[0], range);
ops_set_halo_dirtybit3(&args[1], range);
if (OPS_diags > 1) {
// Update kernel record
ops_timers_core(&c2, &t2);
OPS_kernels[21].mpi_time += t2 - t1;
OPS_kernels[21].transfer += ops_compute_transfer(dim, start, end, &arg0);
OPS_kernels[21].transfer += ops_compute_transfer(dim, start, end, &arg1);
OPS_kernels[21].transfer += ops_compute_transfer(dim, start, end, &arg2);
OPS_kernels[21].transfer += ops_compute_transfer(dim, start, end, &arg3);
OPS_kernels[21].transfer += ops_compute_transfer(dim, start, end, &arg4);
}
}
|
4b2311af5d166db0dd61b633e0b811d4fdfe67a7.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
//
// auto-generated by ops.py
//
__constant__ int dims_fact_kernel [2][1];
static int dims_fact_kernel_h [2][1] = {0};
//user function
__device__
void fact_kernel_gpu(const ACC<double>& eff,
ACC<double>& s) {
double fact;
for (int m=0; m < 3 ;m++) {
fact = 0.50 * dt / dx ;
s(m,0) = -fact * (eff(m,0) - eff(m,-1));
}
}
__global__ void ops_fact_kernel(
double* __restrict arg0,
double* __restrict arg1,
int size0 ){
int idx_x = blockDim.x * blockIdx.x + threadIdx.x;
arg0 += idx_x * 1*3;
arg1 += idx_x * 1*3;
if (idx_x < size0) {
const ACC<double> argp0(3, dims_fact_kernel[0][0], arg0);
ACC<double> argp1(3, dims_fact_kernel[1][0], arg1);
fact_kernel_gpu(argp0, argp1);
}
}
// host stub function
#ifndef OPS_LAZY
void ops_par_loop_fact_kernel(char const *name, ops_block block, int dim, int* range,
ops_arg arg0, ops_arg arg1) {
#else
void ops_par_loop_fact_kernel_execute(ops_kernel_descriptor *desc) {
int dim = desc->dim;
#if OPS_MPI
ops_block block = desc->block;
#endif
int *range = desc->range;
ops_arg arg0 = desc->args[0];
ops_arg arg1 = desc->args[1];
#endif
//Timing
double t1,t2,c1,c2;
ops_arg args[2] = { arg0, arg1};
#if CHECKPOINTING && !OPS_LAZY
if (!ops_checkpointing_before(args,2,range,12)) return;
#endif
if (OPS_diags > 1) {
ops_timing_realloc(12,"fact_kernel");
OPS_kernels[12].count++;
ops_timers_core(&c1,&t1);
}
//compute locally allocated range for the sub-block
int start[1];
int end[1];
#if OPS_MPI && !OPS_LAZY
sub_block_list sb = OPS_sub_block_list[block->index];
#endif //OPS_MPI
#ifdef OPS_MPI
int arg_idx[1];
#endif
#ifdef OPS_MPI
if (compute_ranges(args, 2,block, range, start, end, arg_idx) < 0) return;
#else //OPS_MPI
for ( int n=0; n<1; n++ ){
start[n] = range[2*n];end[n] = range[2*n+1];
}
#endif
int xdim0 = args[0].dat->size[0];
int xdim1 = args[1].dat->size[0];
if (xdim0 != dims_fact_kernel_h[0][0] || xdim1 != dims_fact_kernel_h[1][0]) {
dims_fact_kernel_h[0][0] = xdim0;
dims_fact_kernel_h[1][0] = xdim1;
cutilSafeCall(hipMemcpyToSymbol( dims_fact_kernel, dims_fact_kernel_h, sizeof(dims_fact_kernel)));
}
int x_size = MAX(0,end[0]-start[0]);
dim3 grid( (x_size-1)/OPS_block_size_x+ 1, 1, 1);
dim3 tblock(OPS_block_size_x,1,1);
int dat0 = (OPS_soa ? args[0].dat->type_size : args[0].dat->elem_size);
int dat1 = (OPS_soa ? args[1].dat->type_size : args[1].dat->elem_size);
char *p_a[2];
//set up initial pointers
int base0 = args[0].dat->base_offset +
dat0 * 1 * (start[0] * args[0].stencil->stride[0]);
p_a[0] = (char *)args[0].data_d + base0;
int base1 = args[1].dat->base_offset +
dat1 * 1 * (start[0] * args[1].stencil->stride[0]);
p_a[1] = (char *)args[1].data_d + base1;
#ifndef OPS_LAZY
ops_H_D_exchanges_device(args, 2);
ops_halo_exchanges(args,2,range);
#endif
if (OPS_diags > 1) {
ops_timers_core(&c2,&t2);
OPS_kernels[12].mpi_time += t2-t1;
}
//call kernel wrapper function, passing in pointers to data
if (x_size > 0)
hipLaunchKernelGGL(( ops_fact_kernel), dim3(grid), dim3(tblock) , 0, 0, (double *)p_a[0], (double *)p_a[1],x_size);
cutilSafeCall(hipGetLastError());
if (OPS_diags>1) {
cutilSafeCall(hipDeviceSynchronize());
ops_timers_core(&c1,&t1);
OPS_kernels[12].time += t1-t2;
}
#ifndef OPS_LAZY
ops_set_dirtybit_device(args, 2);
ops_set_halo_dirtybit3(&args[1],range);
#endif
if (OPS_diags > 1) {
//Update kernel record
ops_timers_core(&c2,&t2);
OPS_kernels[12].mpi_time += t2-t1;
OPS_kernels[12].transfer += ops_compute_transfer(dim, start, end, &arg0);
OPS_kernels[12].transfer += ops_compute_transfer(dim, start, end, &arg1);
}
}
#ifdef OPS_LAZY
void ops_par_loop_fact_kernel(char const *name, ops_block block, int dim, int* range,
ops_arg arg0, ops_arg arg1) {
ops_kernel_descriptor *desc = (ops_kernel_descriptor *)malloc(sizeof(ops_kernel_descriptor));
desc->name = name;
desc->block = block;
desc->dim = dim;
desc->device = 1;
desc->index = 12;
desc->hash = 5381;
desc->hash = ((desc->hash << 5) + desc->hash) + 12;
for ( int i=0; i<2; i++ ){
desc->range[i] = range[i];
desc->orig_range[i] = range[i];
desc->hash = ((desc->hash << 5) + desc->hash) + range[i];
}
desc->nargs = 2;
desc->args = (ops_arg*)malloc(2*sizeof(ops_arg));
desc->args[0] = arg0;
desc->hash = ((desc->hash << 5) + desc->hash) + arg0.dat->index;
desc->args[1] = arg1;
desc->hash = ((desc->hash << 5) + desc->hash) + arg1.dat->index;
desc->function = ops_par_loop_fact_kernel_execute;
if (OPS_diags > 1) {
ops_timing_realloc(12,"fact_kernel");
}
ops_enqueue_kernel(desc);
}
#endif
| 4b2311af5d166db0dd61b633e0b811d4fdfe67a7.cu | //
// auto-generated by ops.py
//
__constant__ int dims_fact_kernel [2][1];
static int dims_fact_kernel_h [2][1] = {0};
//user function
__device__
void fact_kernel_gpu(const ACC<double>& eff,
ACC<double>& s) {
double fact;
for (int m=0; m < 3 ;m++) {
fact = 0.50 * dt / dx ;
s(m,0) = -fact * (eff(m,0) - eff(m,-1));
}
}
__global__ void ops_fact_kernel(
double* __restrict arg0,
double* __restrict arg1,
int size0 ){
int idx_x = blockDim.x * blockIdx.x + threadIdx.x;
arg0 += idx_x * 1*3;
arg1 += idx_x * 1*3;
if (idx_x < size0) {
const ACC<double> argp0(3, dims_fact_kernel[0][0], arg0);
ACC<double> argp1(3, dims_fact_kernel[1][0], arg1);
fact_kernel_gpu(argp0, argp1);
}
}
// host stub function
#ifndef OPS_LAZY
void ops_par_loop_fact_kernel(char const *name, ops_block block, int dim, int* range,
ops_arg arg0, ops_arg arg1) {
#else
void ops_par_loop_fact_kernel_execute(ops_kernel_descriptor *desc) {
int dim = desc->dim;
#if OPS_MPI
ops_block block = desc->block;
#endif
int *range = desc->range;
ops_arg arg0 = desc->args[0];
ops_arg arg1 = desc->args[1];
#endif
//Timing
double t1,t2,c1,c2;
ops_arg args[2] = { arg0, arg1};
#if CHECKPOINTING && !OPS_LAZY
if (!ops_checkpointing_before(args,2,range,12)) return;
#endif
if (OPS_diags > 1) {
ops_timing_realloc(12,"fact_kernel");
OPS_kernels[12].count++;
ops_timers_core(&c1,&t1);
}
//compute locally allocated range for the sub-block
int start[1];
int end[1];
#if OPS_MPI && !OPS_LAZY
sub_block_list sb = OPS_sub_block_list[block->index];
#endif //OPS_MPI
#ifdef OPS_MPI
int arg_idx[1];
#endif
#ifdef OPS_MPI
if (compute_ranges(args, 2,block, range, start, end, arg_idx) < 0) return;
#else //OPS_MPI
for ( int n=0; n<1; n++ ){
start[n] = range[2*n];end[n] = range[2*n+1];
}
#endif
int xdim0 = args[0].dat->size[0];
int xdim1 = args[1].dat->size[0];
if (xdim0 != dims_fact_kernel_h[0][0] || xdim1 != dims_fact_kernel_h[1][0]) {
dims_fact_kernel_h[0][0] = xdim0;
dims_fact_kernel_h[1][0] = xdim1;
cutilSafeCall(cudaMemcpyToSymbol( dims_fact_kernel, dims_fact_kernel_h, sizeof(dims_fact_kernel)));
}
int x_size = MAX(0,end[0]-start[0]);
dim3 grid( (x_size-1)/OPS_block_size_x+ 1, 1, 1);
dim3 tblock(OPS_block_size_x,1,1);
int dat0 = (OPS_soa ? args[0].dat->type_size : args[0].dat->elem_size);
int dat1 = (OPS_soa ? args[1].dat->type_size : args[1].dat->elem_size);
char *p_a[2];
//set up initial pointers
int base0 = args[0].dat->base_offset +
dat0 * 1 * (start[0] * args[0].stencil->stride[0]);
p_a[0] = (char *)args[0].data_d + base0;
int base1 = args[1].dat->base_offset +
dat1 * 1 * (start[0] * args[1].stencil->stride[0]);
p_a[1] = (char *)args[1].data_d + base1;
#ifndef OPS_LAZY
ops_H_D_exchanges_device(args, 2);
ops_halo_exchanges(args,2,range);
#endif
if (OPS_diags > 1) {
ops_timers_core(&c2,&t2);
OPS_kernels[12].mpi_time += t2-t1;
}
//call kernel wrapper function, passing in pointers to data
if (x_size > 0)
ops_fact_kernel<<<grid, tblock >>> ( (double *)p_a[0], (double *)p_a[1],x_size);
cutilSafeCall(cudaGetLastError());
if (OPS_diags>1) {
cutilSafeCall(cudaDeviceSynchronize());
ops_timers_core(&c1,&t1);
OPS_kernels[12].time += t1-t2;
}
#ifndef OPS_LAZY
ops_set_dirtybit_device(args, 2);
ops_set_halo_dirtybit3(&args[1],range);
#endif
if (OPS_diags > 1) {
//Update kernel record
ops_timers_core(&c2,&t2);
OPS_kernels[12].mpi_time += t2-t1;
OPS_kernels[12].transfer += ops_compute_transfer(dim, start, end, &arg0);
OPS_kernels[12].transfer += ops_compute_transfer(dim, start, end, &arg1);
}
}
#ifdef OPS_LAZY
void ops_par_loop_fact_kernel(char const *name, ops_block block, int dim, int* range,
ops_arg arg0, ops_arg arg1) {
ops_kernel_descriptor *desc = (ops_kernel_descriptor *)malloc(sizeof(ops_kernel_descriptor));
desc->name = name;
desc->block = block;
desc->dim = dim;
desc->device = 1;
desc->index = 12;
desc->hash = 5381;
desc->hash = ((desc->hash << 5) + desc->hash) + 12;
for ( int i=0; i<2; i++ ){
desc->range[i] = range[i];
desc->orig_range[i] = range[i];
desc->hash = ((desc->hash << 5) + desc->hash) + range[i];
}
desc->nargs = 2;
desc->args = (ops_arg*)malloc(2*sizeof(ops_arg));
desc->args[0] = arg0;
desc->hash = ((desc->hash << 5) + desc->hash) + arg0.dat->index;
desc->args[1] = arg1;
desc->hash = ((desc->hash << 5) + desc->hash) + arg1.dat->index;
desc->function = ops_par_loop_fact_kernel_execute;
if (OPS_diags > 1) {
ops_timing_realloc(12,"fact_kernel");
}
ops_enqueue_kernel(desc);
}
#endif
|
a3cae593ec4c57e49d389221b05283ad01679091.hip | // !!! This is a file automatically generated by hipify!!!
// ----------------------------------------------------------------------------
// Gunrock -- Fast and Efficient GPU Graph Library
// ----------------------------------------------------------------------------
// This source code is distributed under the terms of LICENSE.TXT
// in the root directory of this source distribution.
// ----------------------------------------------------------------------------
/**
* @file hello_app.cu
*
* @brief Simple Gunrock Application
*/
#include <gunrock/gunrock.h>
#include <gunrock/util/test_utils.cuh>
#include <gunrock/graphio/graphio.cuh>
#include <gunrock/app/app_base.cuh>
#include <gunrock/app/test_base.cuh>
#define S1 (10)
#define S2 (5)
// <TODO> change includes
#include <gunrock/app/graphsage-k-hop/hello_enactor.cuh>
#include <gunrock/app/graphsage-k-hop/hello_test.cuh>
// </TODO>
namespace gunrock {
namespace app {
// <TODO> change namespace
namespace hello {
// </TODO>
hipError_t UseParameters(util::Parameters ¶meters) {
hipError_t retval = hipSuccess;
GUARD_CU(UseParameters_app(parameters));
GUARD_CU(UseParameters_problem(parameters));
GUARD_CU(UseParameters_enactor(parameters));
// <TODO> add app specific parameters, eg:
// GUARD_CU(parameters.Use<std::string>(
// "src",
// util::REQUIRED_ARGUMENT | util::MULTI_VALUE | util::OPTIONAL_PARAMETER,
// "0",
// "<Vertex-ID|random|largestdegree> The source vertices\n"
// "\tIf random, randomly select non-zero degree vertices;\n"
// "\tIf largestdegree, select vertices with largest degrees",
// __FILE__, __LINE__));
// </TODO>
return retval;
}
/**
* @brief Run hello tests
* @tparam GraphT Type of the graph
* @tparam ValueT Type of the distances
* @param[in] parameters Excution parameters
* @param[in] graph Input graph
...
* @param[in] target where to perform the app
* \return hipError_t error message(s), if any
*/
template <typename GraphT>
hipError_t RunTests(util::Parameters ¶meters, GraphT &graph,
// <TODO> add problem specific reference results, e.g.:
typename GraphT::VertexT *ref_neighbors,
// </TODO>
util::Location target) {
hipError_t retval = hipSuccess;
typedef typename GraphT::VertexT VertexT;
typedef typename GraphT::ValueT ValueT;
typedef typename GraphT::SizeT SizeT;
typedef typename GraphT::CsrT CsrT;
typedef Problem<GraphT> ProblemT;
typedef Enactor<ProblemT> EnactorT;
// CLI parameters
bool quiet_mode = parameters.Get<bool>("quiet");
int num_runs = parameters.Get<int>("num-runs");
std::string validation = parameters.Get<std::string>("validation");
std::cout << __LINE__ << ": " << graph.nodes << std::endl;
util::Info info("hello", parameters, graph);
util::CpuTimer cpu_timer, total_timer;
cpu_timer.Start();
total_timer.Start();
// <TODO> get problem specific inputs, e.g.:
// std::vector<VertexT> srcs = parameters.Get<std::vector<VertexT>>("srcs");
// printf("RunTests: %d srcs: src[0]=%d\n", srcs.size(), srcs[0]);
// </TODO>
// <TODO> allocate problem specific host data, e.g.:
VertexT *h_neighbors = new VertexT[graph.edges];
SizeT *h_positions = new SizeT[graph.nodes];
SizeT *h_lengths = new SizeT[graph.nodes];
SizeT *h_total_lengths = new SizeT[1];
// </TODO>
// Allocate problem and enactor on GPU, and initialize them
ProblemT problem(parameters);
EnactorT enactor;
GUARD_CU(problem.Init(graph, target));
GUARD_CU(problem.InitNeighborsForHop (graph.nodes*S1, 0, target));
GUARD_CU(enactor.Init(problem, target));
cpu_timer.Stop();
parameters.Set("preprocess-time", cpu_timer.ElapsedMillis());
graph.Display ();
for (int run_num = 0; run_num < num_runs; ++run_num) {
GUARD_CU(problem.Reset(
// <TODO> problem specific data if necessary, eg:
// src,
// </TODO>
target));
GUARD_CU(enactor.Reset(
// <TODO> problem specific data if necessary:
// srcs[run_num % srcs.size()],
// </TODO>
target));
util::PrintMsg("__________________________", !quiet_mode);
cpu_timer.Start();
for (int __hop = 0; __hop < N_HOPS; __hop++) {
GUARD_CU(enactor.Enact(
__hop
));
}
cpu_timer.Stop();
info.CollectSingleRun(cpu_timer.ElapsedMillis());
util::PrintMsg(
"--------------------------\nRun " + std::to_string(run_num) +
" elapsed: " + std::to_string(cpu_timer.ElapsedMillis()) +
", #iterations = " +
std::to_string(enactor.enactor_slices[0].enactor_stats.iteration),
!quiet_mode);
if (false && validation == "each") {
// GUARD_CU(problem.Extract(
// // <TODO> problem specific data
// h_neighbors,
// h_positions,
// h_lengths,
// h_total_lengths
// // </TODO>
// ));
// // SizeT num_errors = Validate_Results(parameters, graph,
// // <TODO> problem specific data
// h_neighbors, ref_neighbors,
// // </TODO>
// false);
}
}
cpu_timer.Start();
//for (int hop = 0; hop < N_HOPS; hop++)
int hop = 1;
{
printf ("for hop %d\n", hop);
SizeT* h_lengths = new SizeT[graph.nodes];
GUARD_CU(problem.Extract_total_lengths(
// <TODO> problem specific data
hop,
h_total_lengths
// </TODO>
));
//h_neighbors = new VertexT[h_total_lengths];
// GUARD_CU(problem.Extract(
// // <TODO> problem specific data
// hop,
// h_neighbors,
// h_total_lengths,
// h_positions,
// h_lengths,
// &h_total_lengths
// // </TODO>
// ));
int sum_e = 0;
for (SizeT v = 0; v < graph.nodes; v++) {
//printf ("v %d pos %d l %d\n", v, h_positions[v], h_lengths[v]);
sum_e += h_lengths[v];
}
std::cout << "Sum of lengths " << sum_e << std::endl;
// for (SizeT v = 0; v < graph.nodes - 1; v++) {
// //printf ("v %d l1 %d pos %d\n", v, h_lengths[v], h_positions[v]); //graph.CsrT::GetNeighborListOffset (v+1)-graph.CsrT::GetNeighborListOffset (v),);
// //assert (h_lengths[v] == graph.CsrT::GetNeighborListOffset (v+1)-graph.CsrT::GetNeighborListOffset (v));
// for (int e = 0; e < h_lengths[v]; e++) {
// int d_pos = h_positions[v] + e;
// int cpu_pos = graph.CsrT::GetNeighborListOffset (v) + e;
// // printf (" %d %d\n", h_neighbors[d_pos], graph.CsrT::GetEdgeDest (cpu_pos));
// }
// }
}
std::cout << "Time to exclude " << enactor.exclude_time << std::endl;
if (false && validation == "last") {
// SizeT num_errors = Validate_Results(parameters, graph,
// // <TODO> problem specific data
// h_neighbors, ref_neighbors,
// // </TODO>
// false);
}
// compute running statistics
// <TODO> change NULL to problem specific per-vertex visited marker, e.g.
// h_distances
info.ComputeTraversalStats(enactor, (VertexT *)NULL);
// Display_Memory_Usage(problem);
#ifdef ENABLE_PERFORMANCE_PROFILING
// Display_Performance_Profiling(&enactor);
#endif
// </TODO>
// Clean up
GUARD_CU(enactor.Release(target));
GUARD_CU(problem.Release(target));
// <TODO> Release problem specific data, e.g.:
delete[] h_neighbors;
h_neighbors = NULL;
// </TODO>
cpu_timer.Stop();
total_timer.Stop();
info.Finalize(cpu_timer.ElapsedMillis(), total_timer.ElapsedMillis());
return retval;
}
} // namespace hello
} // namespace app
} // namespace gunrock
// ===========================================================================================
// ========================= CODE BELOW THIS LINE NOT NEEDED FOR TESTS
// =======================
// ===========================================================================================
// /*
// * @brief Entry of gunrock_template function
// * @tparam GraphT Type of the graph
// * @tparam ValueT Type of the distances
// * @param[in] parameters Excution parameters
// * @param[in] graph Input graph
// * @param[out] distances Return shortest distance to source per vertex
// * @param[out] preds Return predecessors of each vertex
// * \return double Return accumulated elapsed times for all runs
// */
// template <typename GraphT, typename ValueT = typename GraphT::ValueT>
// double gunrock_Template(
// gunrock::util::Parameters ¶meters,
// GraphT &graph
// // TODO: add problem specific outputs, e.g.:
// //ValueT **distances
// )
// {
// typedef typename GraphT::VertexT VertexT;
// typedef gunrock::app::Template::Problem<GraphT > ProblemT;
// typedef gunrock::app::Template::Enactor<ProblemT> EnactorT;
// gunrock::util::CpuTimer cpu_timer;
// gunrock::util::Location target = gunrock::util::DEVICE;
// double total_time = 0;
// if (parameters.UseDefault("quiet"))
// parameters.Set("quiet", true);
// // Allocate problem and enactor on GPU, and initialize them
// ProblemT problem(parameters);
// EnactorT enactor;
// problem.Init(graph , target);
// enactor.Init(problem, target);
// int num_runs = parameters.Get<int>("num-runs");
// // TODO: get problem specific inputs, e.g.:
// // std::vector<VertexT> srcs =
// parameters.Get<std::vector<VertexT>>("srcs");
// // int num_srcs = srcs.size();
// for (int run_num = 0; run_num < num_runs; ++run_num)
// {
// // TODO: problem specific inputs, e.g.:
// // int src_num = run_num % num_srcs;
// // VertexT src = srcs[src_num];
// problem.Reset(/*src,*/ target);
// enactor.Reset(/*src,*/ target);
// cpu_timer.Start();
// enactor.Enact(/*src*/);
// cpu_timer.Stop();
// total_time += cpu_timer.ElapsedMillis();
// // TODO: extract problem specific data, e.g.:
// problem.Extract(/*distances[src_num]*/);
// }
// enactor.Release(target);
// problem.Release(target);
// // TODO: problem specific clean ups, e.g.:
// // srcs.clear();
// return total_time;
// }
// * @brief Simple interface take in graph as CSR format
// * @param[in] num_nodes Number of veritces in the input graph
// * @param[in] num_edges Number of edges in the input graph
// * @param[in] row_offsets CSR-formatted graph input row offsets
// * @param[in] col_indices CSR-formatted graph input column indices
// * @param[in] edge_values CSR-formatted graph input edge weights
// * @param[in] num_runs Number of runs to perform SSSP
// * @param[in] sources Sources to begin traverse, one for each run
// * @param[in] mark_preds Whether to output predecessor info
// * @param[out] distances Return shortest distance to source per vertex
// * @param[out] preds Return predecessors of each vertex
// * \return double Return accumulated elapsed times for all runs
// template <
// typename VertexT = int,
// typename SizeT = int,
// typename GValueT = unsigned int,
// typename TValueT = GValueT>
// float Template(
// const SizeT num_nodes,
// const SizeT num_edges,
// const SizeT *row_offsets,
// const VertexT *col_indices,
// const GValueT *edge_values,
// const int num_runs
// // TODO: add problem specific inputs and outputs, e.g.:
// // VertexT *sources,
// // SSSPValueT **distances
// )
// {
// // TODO: change to other graph representation, if not using CSR
// typedef typename gunrock::app::TestGraph<VertexT, SizeT, GValueT,
// gunrock::graph::HAS_EDGE_VALUES | gunrock::graph::HAS_CSR>
// GraphT;
// typedef typename GraphT::CsrT CsrT;
// // Setup parameters
// gunrock::util::Parameters parameters("Template");
// gunrock::graphio::UseParameters(parameters);
// gunrock::app::Template::UseParameters(parameters);
// gunrock::app::UseParameters_test(parameters);
// parameters.Parse_CommandLine(0, NULL);
// parameters.Set("graph-type", "by-pass");
// parameters.Set("num-runs", num_runs);
// // TODO: problem specific inputs, e.g.:
// // std::vector<VertexT> srcs;
// // for (int i = 0; i < num_runs; i ++)
// // srcs.push_back(sources[i]);
// // parameters.Set("srcs", srcs);
// bool quiet = parameters.Get<bool>("quiet");
// GraphT graph;
// // Assign pointers into gunrock graph format
// // TODO: change to other graph representation, if not using CSR
// graph.CsrT::Allocate(num_nodes, num_edges, gunrock::util::HOST);
// graph.CsrT::row_offsets .SetPointer(row_offsets, num_nodes + 1,
// gunrock::util::HOST); graph.CsrT::column_indices.SetPointer(col_indices,
// num_edges, gunrock::util::HOST); graph.CsrT::edge_values
// .SetPointer(edge_values, num_edges, gunrock::util::HOST);
// graph.FromCsr(graph.csr(), true, quiet);
// gunrock::graphio::LoadGraph(parameters, graph);
// // Run the Template
// // TODO: add problem specific outputs, e.g.
// double elapsed_time = gunrock_Template(parameters, graph /*,
// distances*/);
// // Cleanup
// graph.Release();
// // TODO: problem specific cleanup
// // srcs.clear();
// return elapsed_time;
// }
// // Leave this at the end of the file
// // Local Variables:
// // mode:c++
// // c-file-style: "NVIDIA"
// // End:
| a3cae593ec4c57e49d389221b05283ad01679091.cu | // ----------------------------------------------------------------------------
// Gunrock -- Fast and Efficient GPU Graph Library
// ----------------------------------------------------------------------------
// This source code is distributed under the terms of LICENSE.TXT
// in the root directory of this source distribution.
// ----------------------------------------------------------------------------
/**
* @file hello_app.cu
*
* @brief Simple Gunrock Application
*/
#include <gunrock/gunrock.h>
#include <gunrock/util/test_utils.cuh>
#include <gunrock/graphio/graphio.cuh>
#include <gunrock/app/app_base.cuh>
#include <gunrock/app/test_base.cuh>
#define S1 (10)
#define S2 (5)
// <TODO> change includes
#include <gunrock/app/graphsage-k-hop/hello_enactor.cuh>
#include <gunrock/app/graphsage-k-hop/hello_test.cuh>
// </TODO>
namespace gunrock {
namespace app {
// <TODO> change namespace
namespace hello {
// </TODO>
cudaError_t UseParameters(util::Parameters ¶meters) {
cudaError_t retval = cudaSuccess;
GUARD_CU(UseParameters_app(parameters));
GUARD_CU(UseParameters_problem(parameters));
GUARD_CU(UseParameters_enactor(parameters));
// <TODO> add app specific parameters, eg:
// GUARD_CU(parameters.Use<std::string>(
// "src",
// util::REQUIRED_ARGUMENT | util::MULTI_VALUE | util::OPTIONAL_PARAMETER,
// "0",
// "<Vertex-ID|random|largestdegree> The source vertices\n"
// "\tIf random, randomly select non-zero degree vertices;\n"
// "\tIf largestdegree, select vertices with largest degrees",
// __FILE__, __LINE__));
// </TODO>
return retval;
}
/**
* @brief Run hello tests
* @tparam GraphT Type of the graph
* @tparam ValueT Type of the distances
* @param[in] parameters Excution parameters
* @param[in] graph Input graph
...
* @param[in] target where to perform the app
* \return cudaError_t error message(s), if any
*/
template <typename GraphT>
cudaError_t RunTests(util::Parameters ¶meters, GraphT &graph,
// <TODO> add problem specific reference results, e.g.:
typename GraphT::VertexT *ref_neighbors,
// </TODO>
util::Location target) {
cudaError_t retval = cudaSuccess;
typedef typename GraphT::VertexT VertexT;
typedef typename GraphT::ValueT ValueT;
typedef typename GraphT::SizeT SizeT;
typedef typename GraphT::CsrT CsrT;
typedef Problem<GraphT> ProblemT;
typedef Enactor<ProblemT> EnactorT;
// CLI parameters
bool quiet_mode = parameters.Get<bool>("quiet");
int num_runs = parameters.Get<int>("num-runs");
std::string validation = parameters.Get<std::string>("validation");
std::cout << __LINE__ << ": " << graph.nodes << std::endl;
util::Info info("hello", parameters, graph);
util::CpuTimer cpu_timer, total_timer;
cpu_timer.Start();
total_timer.Start();
// <TODO> get problem specific inputs, e.g.:
// std::vector<VertexT> srcs = parameters.Get<std::vector<VertexT>>("srcs");
// printf("RunTests: %d srcs: src[0]=%d\n", srcs.size(), srcs[0]);
// </TODO>
// <TODO> allocate problem specific host data, e.g.:
VertexT *h_neighbors = new VertexT[graph.edges];
SizeT *h_positions = new SizeT[graph.nodes];
SizeT *h_lengths = new SizeT[graph.nodes];
SizeT *h_total_lengths = new SizeT[1];
// </TODO>
// Allocate problem and enactor on GPU, and initialize them
ProblemT problem(parameters);
EnactorT enactor;
GUARD_CU(problem.Init(graph, target));
GUARD_CU(problem.InitNeighborsForHop (graph.nodes*S1, 0, target));
GUARD_CU(enactor.Init(problem, target));
cpu_timer.Stop();
parameters.Set("preprocess-time", cpu_timer.ElapsedMillis());
graph.Display ();
for (int run_num = 0; run_num < num_runs; ++run_num) {
GUARD_CU(problem.Reset(
// <TODO> problem specific data if necessary, eg:
// src,
// </TODO>
target));
GUARD_CU(enactor.Reset(
// <TODO> problem specific data if necessary:
// srcs[run_num % srcs.size()],
// </TODO>
target));
util::PrintMsg("__________________________", !quiet_mode);
cpu_timer.Start();
for (int __hop = 0; __hop < N_HOPS; __hop++) {
GUARD_CU(enactor.Enact(
__hop
));
}
cpu_timer.Stop();
info.CollectSingleRun(cpu_timer.ElapsedMillis());
util::PrintMsg(
"--------------------------\nRun " + std::to_string(run_num) +
" elapsed: " + std::to_string(cpu_timer.ElapsedMillis()) +
", #iterations = " +
std::to_string(enactor.enactor_slices[0].enactor_stats.iteration),
!quiet_mode);
if (false && validation == "each") {
// GUARD_CU(problem.Extract(
// // <TODO> problem specific data
// h_neighbors,
// h_positions,
// h_lengths,
// h_total_lengths
// // </TODO>
// ));
// // SizeT num_errors = Validate_Results(parameters, graph,
// // <TODO> problem specific data
// h_neighbors, ref_neighbors,
// // </TODO>
// false);
}
}
cpu_timer.Start();
//for (int hop = 0; hop < N_HOPS; hop++)
int hop = 1;
{
printf ("for hop %d\n", hop);
SizeT* h_lengths = new SizeT[graph.nodes];
GUARD_CU(problem.Extract_total_lengths(
// <TODO> problem specific data
hop,
h_total_lengths
// </TODO>
));
//h_neighbors = new VertexT[h_total_lengths];
// GUARD_CU(problem.Extract(
// // <TODO> problem specific data
// hop,
// h_neighbors,
// h_total_lengths,
// h_positions,
// h_lengths,
// &h_total_lengths
// // </TODO>
// ));
int sum_e = 0;
for (SizeT v = 0; v < graph.nodes; v++) {
//printf ("v %d pos %d l %d\n", v, h_positions[v], h_lengths[v]);
sum_e += h_lengths[v];
}
std::cout << "Sum of lengths " << sum_e << std::endl;
// for (SizeT v = 0; v < graph.nodes - 1; v++) {
// //printf ("v %d l1 %d pos %d\n", v, h_lengths[v], h_positions[v]); //graph.CsrT::GetNeighborListOffset (v+1)-graph.CsrT::GetNeighborListOffset (v),);
// //assert (h_lengths[v] == graph.CsrT::GetNeighborListOffset (v+1)-graph.CsrT::GetNeighborListOffset (v));
// for (int e = 0; e < h_lengths[v]; e++) {
// int d_pos = h_positions[v] + e;
// int cpu_pos = graph.CsrT::GetNeighborListOffset (v) + e;
// // printf (" %d %d\n", h_neighbors[d_pos], graph.CsrT::GetEdgeDest (cpu_pos));
// }
// }
}
std::cout << "Time to exclude " << enactor.exclude_time << std::endl;
if (false && validation == "last") {
// SizeT num_errors = Validate_Results(parameters, graph,
// // <TODO> problem specific data
// h_neighbors, ref_neighbors,
// // </TODO>
// false);
}
// compute running statistics
// <TODO> change NULL to problem specific per-vertex visited marker, e.g.
// h_distances
info.ComputeTraversalStats(enactor, (VertexT *)NULL);
// Display_Memory_Usage(problem);
#ifdef ENABLE_PERFORMANCE_PROFILING
// Display_Performance_Profiling(&enactor);
#endif
// </TODO>
// Clean up
GUARD_CU(enactor.Release(target));
GUARD_CU(problem.Release(target));
// <TODO> Release problem specific data, e.g.:
delete[] h_neighbors;
h_neighbors = NULL;
// </TODO>
cpu_timer.Stop();
total_timer.Stop();
info.Finalize(cpu_timer.ElapsedMillis(), total_timer.ElapsedMillis());
return retval;
}
} // namespace hello
} // namespace app
} // namespace gunrock
// ===========================================================================================
// ========================= CODE BELOW THIS LINE NOT NEEDED FOR TESTS
// =======================
// ===========================================================================================
// /*
// * @brief Entry of gunrock_template function
// * @tparam GraphT Type of the graph
// * @tparam ValueT Type of the distances
// * @param[in] parameters Excution parameters
// * @param[in] graph Input graph
// * @param[out] distances Return shortest distance to source per vertex
// * @param[out] preds Return predecessors of each vertex
// * \return double Return accumulated elapsed times for all runs
// */
// template <typename GraphT, typename ValueT = typename GraphT::ValueT>
// double gunrock_Template(
// gunrock::util::Parameters ¶meters,
// GraphT &graph
// // TODO: add problem specific outputs, e.g.:
// //ValueT **distances
// )
// {
// typedef typename GraphT::VertexT VertexT;
// typedef gunrock::app::Template::Problem<GraphT > ProblemT;
// typedef gunrock::app::Template::Enactor<ProblemT> EnactorT;
// gunrock::util::CpuTimer cpu_timer;
// gunrock::util::Location target = gunrock::util::DEVICE;
// double total_time = 0;
// if (parameters.UseDefault("quiet"))
// parameters.Set("quiet", true);
// // Allocate problem and enactor on GPU, and initialize them
// ProblemT problem(parameters);
// EnactorT enactor;
// problem.Init(graph , target);
// enactor.Init(problem, target);
// int num_runs = parameters.Get<int>("num-runs");
// // TODO: get problem specific inputs, e.g.:
// // std::vector<VertexT> srcs =
// parameters.Get<std::vector<VertexT>>("srcs");
// // int num_srcs = srcs.size();
// for (int run_num = 0; run_num < num_runs; ++run_num)
// {
// // TODO: problem specific inputs, e.g.:
// // int src_num = run_num % num_srcs;
// // VertexT src = srcs[src_num];
// problem.Reset(/*src,*/ target);
// enactor.Reset(/*src,*/ target);
// cpu_timer.Start();
// enactor.Enact(/*src*/);
// cpu_timer.Stop();
// total_time += cpu_timer.ElapsedMillis();
// // TODO: extract problem specific data, e.g.:
// problem.Extract(/*distances[src_num]*/);
// }
// enactor.Release(target);
// problem.Release(target);
// // TODO: problem specific clean ups, e.g.:
// // srcs.clear();
// return total_time;
// }
// * @brief Simple interface take in graph as CSR format
// * @param[in] num_nodes Number of veritces in the input graph
// * @param[in] num_edges Number of edges in the input graph
// * @param[in] row_offsets CSR-formatted graph input row offsets
// * @param[in] col_indices CSR-formatted graph input column indices
// * @param[in] edge_values CSR-formatted graph input edge weights
// * @param[in] num_runs Number of runs to perform SSSP
// * @param[in] sources Sources to begin traverse, one for each run
// * @param[in] mark_preds Whether to output predecessor info
// * @param[out] distances Return shortest distance to source per vertex
// * @param[out] preds Return predecessors of each vertex
// * \return double Return accumulated elapsed times for all runs
// template <
// typename VertexT = int,
// typename SizeT = int,
// typename GValueT = unsigned int,
// typename TValueT = GValueT>
// float Template(
// const SizeT num_nodes,
// const SizeT num_edges,
// const SizeT *row_offsets,
// const VertexT *col_indices,
// const GValueT *edge_values,
// const int num_runs
// // TODO: add problem specific inputs and outputs, e.g.:
// // VertexT *sources,
// // SSSPValueT **distances
// )
// {
// // TODO: change to other graph representation, if not using CSR
// typedef typename gunrock::app::TestGraph<VertexT, SizeT, GValueT,
// gunrock::graph::HAS_EDGE_VALUES | gunrock::graph::HAS_CSR>
// GraphT;
// typedef typename GraphT::CsrT CsrT;
// // Setup parameters
// gunrock::util::Parameters parameters("Template");
// gunrock::graphio::UseParameters(parameters);
// gunrock::app::Template::UseParameters(parameters);
// gunrock::app::UseParameters_test(parameters);
// parameters.Parse_CommandLine(0, NULL);
// parameters.Set("graph-type", "by-pass");
// parameters.Set("num-runs", num_runs);
// // TODO: problem specific inputs, e.g.:
// // std::vector<VertexT> srcs;
// // for (int i = 0; i < num_runs; i ++)
// // srcs.push_back(sources[i]);
// // parameters.Set("srcs", srcs);
// bool quiet = parameters.Get<bool>("quiet");
// GraphT graph;
// // Assign pointers into gunrock graph format
// // TODO: change to other graph representation, if not using CSR
// graph.CsrT::Allocate(num_nodes, num_edges, gunrock::util::HOST);
// graph.CsrT::row_offsets .SetPointer(row_offsets, num_nodes + 1,
// gunrock::util::HOST); graph.CsrT::column_indices.SetPointer(col_indices,
// num_edges, gunrock::util::HOST); graph.CsrT::edge_values
// .SetPointer(edge_values, num_edges, gunrock::util::HOST);
// graph.FromCsr(graph.csr(), true, quiet);
// gunrock::graphio::LoadGraph(parameters, graph);
// // Run the Template
// // TODO: add problem specific outputs, e.g.
// double elapsed_time = gunrock_Template(parameters, graph /*,
// distances*/);
// // Cleanup
// graph.Release();
// // TODO: problem specific cleanup
// // srcs.clear();
// return elapsed_time;
// }
// // Leave this at the end of the file
// // Local Variables:
// // mode:c++
// // c-file-style: "NVIDIA"
// // End:
|
6e5d28cc0a18c6d463ec73ab739a8788b3cc3061.hip | // !!! This is a file automatically generated by hipify!!!
#include <iostream>
#include "../timer.h"
#include "../utils.hpp"
#include <string>
#include <stdio.h>
void preProcess(float **d_luminance, unsigned int **d_cdf,
size_t *numRows, size_t *numCols, unsigned int *numBins,
const std::string& filename);
void postProcess(const std::string& output_file, size_t numRows, size_t numCols,
float min_logLum, float max_logLum);
void your_histogram_and_prefixsum(const float* const d_luminance,
unsigned int* const d_cdf,
float &min_logLum,
float &max_logLum,
const size_t numRows,
const size_t numCols,
const size_t numBins);
#include "HW3.hip"
#include "student_func.hip"
int main(int argc, char **argv) {
float *d_luminance;
unsigned int *d_cdf;
size_t numRows, numCols;
unsigned int numBins;
std::string input_file;
std::string output_file;
if (argc == 3) {
input_file = std::string(argv[1]);
output_file = std::string(argv[2]);
}
else {
std::cerr << "Usage: ./hw input_file output_file" << std::endl;
exit(1);
}
//load the image and give us our input and output pointers
preProcess(&d_luminance, &d_cdf,
&numRows, &numCols, &numBins, input_file);
GpuTimer timer;
float min_logLum, max_logLum;
min_logLum = 0.f;
max_logLum = 1.f;
timer.Start();
//call the students' code
your_histogram_and_prefixsum(d_luminance, d_cdf, min_logLum, max_logLum,
numRows, numCols, numBins);
timer.Stop();
hipDeviceSynchronize(); checkCudaErrors(hipGetLastError());
int err = printf("%f msecs.\n", timer.Elapsed());
if (err < 0) {
//Couldn't print! Probably the student closed stdout - bad news
std::cerr << "Couldn't print timing information! STDOUT Closed!" << std::endl;
exit(1);
}
//check results and output the tone-mapped image
postProcess(output_file, numRows, numCols, min_logLum, max_logLum);
return 0;
} | 6e5d28cc0a18c6d463ec73ab739a8788b3cc3061.cu | #include <iostream>
#include "../timer.h"
#include "../utils.hpp"
#include <string>
#include <stdio.h>
void preProcess(float **d_luminance, unsigned int **d_cdf,
size_t *numRows, size_t *numCols, unsigned int *numBins,
const std::string& filename);
void postProcess(const std::string& output_file, size_t numRows, size_t numCols,
float min_logLum, float max_logLum);
void your_histogram_and_prefixsum(const float* const d_luminance,
unsigned int* const d_cdf,
float &min_logLum,
float &max_logLum,
const size_t numRows,
const size_t numCols,
const size_t numBins);
#include "HW3.cu"
#include "student_func.cu"
int main(int argc, char **argv) {
float *d_luminance;
unsigned int *d_cdf;
size_t numRows, numCols;
unsigned int numBins;
std::string input_file;
std::string output_file;
if (argc == 3) {
input_file = std::string(argv[1]);
output_file = std::string(argv[2]);
}
else {
std::cerr << "Usage: ./hw input_file output_file" << std::endl;
exit(1);
}
//load the image and give us our input and output pointers
preProcess(&d_luminance, &d_cdf,
&numRows, &numCols, &numBins, input_file);
GpuTimer timer;
float min_logLum, max_logLum;
min_logLum = 0.f;
max_logLum = 1.f;
timer.Start();
//call the students' code
your_histogram_and_prefixsum(d_luminance, d_cdf, min_logLum, max_logLum,
numRows, numCols, numBins);
timer.Stop();
cudaDeviceSynchronize(); checkCudaErrors(cudaGetLastError());
int err = printf("%f msecs.\n", timer.Elapsed());
if (err < 0) {
//Couldn't print! Probably the student closed stdout - bad news
std::cerr << "Couldn't print timing information! STDOUT Closed!" << std::endl;
exit(1);
}
//check results and output the tone-mapped image
postProcess(output_file, numRows, numCols, min_logLum, max_logLum);
return 0;
} |
3a8a46f56f0e2103c11d2cbbc3263a5aa8cb9f30.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
// Homework 1
// Color to Greyscale Conversion
//A common way to represent color images is known as RGBA - the color
//is specified by how much Red, Green, and Blue is in it.
//The 'A' stands for Alpha and is used for transparency; it will be
//ignored in this homework.
//Each channel Red, Blue, Green, and Alpha is represented by one byte.
//Since we are using one byte for each color there are 256 different
//possible values for each color. This means we use 4 bytes per pixel.
//Greyscale images are represented by a single intensity value per pixel
//which is one byte in size.
//To convert an image from color to grayscale one simple method is to
//set the intensity to the average of the RGB channels. But we will
//use a more sophisticated method that takes into account how the eye
//perceives color and weights the channels unequally.
//The eye responds most strongly to green followed by red and then blue.
//The NTSC (National Television System Committee) recommends the following
//formula for color to greyscale conversion:
//I = .299f * R + .587f * G + .114f * B
//Notice the trailing f's on the numbers which indicate that they are
//single precision floating point constants and not double precision
//constants.
//You should fill in the kernel as well as set the block and grid sizes
//so that the entire image is processed.
#include "reference_calc.cpp"
#include "utils.h"
#include <stdio.h>
__global__
void rgba_to_greyscale(const uchar4* const rgbaImage,
unsigned char* const greyImage,
int numRows, int numCols)
{
//TODO
//Fill in the kernel to convert from color to greyscale
//the mapping from components of a uchar4 to RGBA is:
// .x -> R ; .y -> G ; .z -> B ; .w -> A
//
//The output (greyImage) at each pixel should be the result of
//applying the formula: output = .299f * R + .587f * G + .114f * B;
//Note: We will be ignoring the alpha channel for this conversion
//First create a mapping from the 2D block and grid locations
//to an absolute 2D location in the image, then use that to
//calculate a 1D offset
int c = blockIdx.x;
int r = threadIdx.x;
uchar4 rgba = rgbaImage[r * numCols + c];
float channelSum = .299f * rgba.x + .587f * rgba.y + .114f * rgba.z;
greyImage[r * numCols + c] = channelSum;
}
void your_rgba_to_greyscale(const uchar4 * const h_rgbaImage, uchar4 * const d_rgbaImage,
unsigned char* const d_greyImage, size_t numRows, size_t numCols)
{
//You must fill in the correct sizes for the blockSize and gridSize
//currently only one block with one thread is being launched
const dim3 blockSize(numRows, 1, 1); //TODO
const dim3 gridSize( numCols, 1, 1); //TODO
hipLaunchKernelGGL(( rgba_to_greyscale), dim3(gridSize), dim3(blockSize), 0, 0, d_rgbaImage, d_greyImage, numRows, numCols);
hipDeviceSynchronize(); checkCudaErrors(hipGetLastError());
}
| 3a8a46f56f0e2103c11d2cbbc3263a5aa8cb9f30.cu | // Homework 1
// Color to Greyscale Conversion
//A common way to represent color images is known as RGBA - the color
//is specified by how much Red, Green, and Blue is in it.
//The 'A' stands for Alpha and is used for transparency; it will be
//ignored in this homework.
//Each channel Red, Blue, Green, and Alpha is represented by one byte.
//Since we are using one byte for each color there are 256 different
//possible values for each color. This means we use 4 bytes per pixel.
//Greyscale images are represented by a single intensity value per pixel
//which is one byte in size.
//To convert an image from color to grayscale one simple method is to
//set the intensity to the average of the RGB channels. But we will
//use a more sophisticated method that takes into account how the eye
//perceives color and weights the channels unequally.
//The eye responds most strongly to green followed by red and then blue.
//The NTSC (National Television System Committee) recommends the following
//formula for color to greyscale conversion:
//I = .299f * R + .587f * G + .114f * B
//Notice the trailing f's on the numbers which indicate that they are
//single precision floating point constants and not double precision
//constants.
//You should fill in the kernel as well as set the block and grid sizes
//so that the entire image is processed.
#include "reference_calc.cpp"
#include "utils.h"
#include <stdio.h>
__global__
void rgba_to_greyscale(const uchar4* const rgbaImage,
unsigned char* const greyImage,
int numRows, int numCols)
{
//TODO
//Fill in the kernel to convert from color to greyscale
//the mapping from components of a uchar4 to RGBA is:
// .x -> R ; .y -> G ; .z -> B ; .w -> A
//
//The output (greyImage) at each pixel should be the result of
//applying the formula: output = .299f * R + .587f * G + .114f * B;
//Note: We will be ignoring the alpha channel for this conversion
//First create a mapping from the 2D block and grid locations
//to an absolute 2D location in the image, then use that to
//calculate a 1D offset
int c = blockIdx.x;
int r = threadIdx.x;
uchar4 rgba = rgbaImage[r * numCols + c];
float channelSum = .299f * rgba.x + .587f * rgba.y + .114f * rgba.z;
greyImage[r * numCols + c] = channelSum;
}
void your_rgba_to_greyscale(const uchar4 * const h_rgbaImage, uchar4 * const d_rgbaImage,
unsigned char* const d_greyImage, size_t numRows, size_t numCols)
{
//You must fill in the correct sizes for the blockSize and gridSize
//currently only one block with one thread is being launched
const dim3 blockSize(numRows, 1, 1); //TODO
const dim3 gridSize( numCols, 1, 1); //TODO
rgba_to_greyscale<<<gridSize, blockSize>>>(d_rgbaImage, d_greyImage, numRows, numCols);
cudaDeviceSynchronize(); checkCudaErrors(cudaGetLastError());
}
|
ca1738a6060a5b03bad978afeb7b6a21e7e9049e.hip | // !!! This is a file automatically generated by hipify!!!
/*
* Copyright (c) 2020, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include <graph.hpp>
#include <rmm/thrust_rmm_allocator.h>
#include <community/louvain_kernels.hpp>
#include <utilities/graph_utils.cuh>
//#define TIMING
#ifdef TIMING
#include <utilities/high_res_timer.hpp>
#endif
#include <converters/COOtoCSR.cuh>
namespace cugraph {
namespace detail {
template <typename vertex_t, typename edge_t, typename weight_t>
weight_t update_clustering_by_delta_modularity_constrained(
weight_t total_edge_weight,
weight_t resolution,
GraphCSRView<vertex_t, edge_t, weight_t> const &graph,
rmm::device_vector<vertex_t> const &src_indices,
rmm::device_vector<weight_t> const &vertex_weights,
rmm::device_vector<weight_t> &cluster_weights,
rmm::device_vector<vertex_t> &cluster,
rmm::device_vector<vertex_t> &constraint,
hipStream_t stream)
{
rmm::device_vector<vertex_t> next_cluster(cluster);
rmm::device_vector<weight_t> delta_Q(graph.number_of_edges);
rmm::device_vector<vertex_t> cluster_hash(graph.number_of_edges);
rmm::device_vector<weight_t> old_cluster_sum(graph.number_of_vertices);
weight_t *d_delta_Q = delta_Q.data().get();
vertex_t *d_constraint = constraint.data().get();
vertex_t const *d_src_indices = src_indices.data().get();
vertex_t const *d_dst_indices = graph.indices;
weight_t new_Q = modularity(total_edge_weight, resolution, graph, cluster.data().get(), stream);
weight_t cur_Q = new_Q - 1;
// To avoid the potential of having two vertices swap clusters
// we will only allow vertices to move up (true) or down (false)
// during each iteration of the loop
bool up_down = true;
while (new_Q > (cur_Q + 0.0001)) {
cur_Q = new_Q;
compute_delta_modularity(total_edge_weight,
resolution,
graph,
src_indices,
vertex_weights,
cluster_weights,
cluster,
cluster_hash,
delta_Q,
old_cluster_sum,
stream);
// Filter out positive delta_Q values for nodes not in the same constraint group
thrust::for_each(
rmm::exec_policy(stream)->on(stream),
thrust::make_counting_iterator(0),
thrust::make_counting_iterator(graph.number_of_edges),
[d_src_indices, d_dst_indices, d_constraint, d_delta_Q] __device__(vertex_t i) {
vertex_t start_cluster = d_constraint[d_src_indices[i]];
vertex_t end_cluster = d_constraint[d_dst_indices[i]];
if (start_cluster != end_cluster) d_delta_Q[i] = weight_t{0.0};
});
assign_nodes(graph,
delta_Q,
cluster_hash,
src_indices,
next_cluster,
vertex_weights,
cluster_weights,
up_down,
stream);
up_down = !up_down;
new_Q = modularity(total_edge_weight, resolution, graph, next_cluster.data().get(), stream);
if (new_Q > cur_Q) {
thrust::copy(rmm::exec_policy(stream)->on(stream),
next_cluster.begin(),
next_cluster.end(),
cluster.begin());
}
}
return cur_Q;
}
template float update_clustering_by_delta_modularity_constrained(
float,
float,
GraphCSRView<int32_t, int32_t, float> const &,
rmm::device_vector<int32_t> const &,
rmm::device_vector<float> const &,
rmm::device_vector<float> &,
rmm::device_vector<int32_t> &,
rmm::device_vector<int32_t> &,
hipStream_t);
template double update_clustering_by_delta_modularity_constrained(
double,
double,
GraphCSRView<int32_t, int32_t, double> const &,
rmm::device_vector<int32_t> const &,
rmm::device_vector<double> const &,
rmm::device_vector<double> &,
rmm::device_vector<int32_t> &,
rmm::device_vector<int32_t> &,
hipStream_t);
template <typename vertex_t, typename edge_t, typename weight_t>
void leiden(GraphCSRView<vertex_t, edge_t, weight_t> const &graph,
weight_t &final_modularity,
int &num_level,
vertex_t *cluster_vec,
int max_level,
weight_t resolution,
hipStream_t stream)
{
#ifdef TIMING
HighResTimer hr_timer;
#endif
num_level = 0;
//
// Vectors to create a copy of the graph
//
rmm::device_vector<edge_t> offsets_v(graph.offsets, graph.offsets + graph.number_of_vertices + 1);
rmm::device_vector<vertex_t> indices_v(graph.indices, graph.indices + graph.number_of_edges);
rmm::device_vector<weight_t> weights_v(graph.edge_data, graph.edge_data + graph.number_of_edges);
rmm::device_vector<vertex_t> src_indices_v(graph.number_of_edges);
//
// Weights and clustering across iterations of algorithm
//
rmm::device_vector<weight_t> vertex_weights_v(graph.number_of_vertices);
rmm::device_vector<weight_t> cluster_weights_v(graph.number_of_vertices);
rmm::device_vector<vertex_t> cluster_v(graph.number_of_vertices);
//
// Temporaries used within kernels. Each iteration uses less
// of this memory
//
rmm::device_vector<vertex_t> tmp_arr_v(graph.number_of_vertices);
rmm::device_vector<vertex_t> cluster_inverse_v(graph.number_of_vertices);
weight_t total_edge_weight =
thrust::reduce(rmm::exec_policy(stream)->on(stream), weights_v.begin(), weights_v.end());
weight_t best_modularity = -1;
//
// Initialize every cluster to reference each vertex to itself
//
thrust::sequence(rmm::exec_policy(stream)->on(stream), cluster_v.begin(), cluster_v.end());
thrust::copy(
rmm::exec_policy(stream)->on(stream), cluster_v.begin(), cluster_v.end(), cluster_vec);
//
// Our copy of the graph. Each iteration of the outer loop will
// shrink this copy of the graph.
//
GraphCSRView<vertex_t, edge_t, weight_t> current_graph(offsets_v.data().get(),
indices_v.data().get(),
weights_v.data().get(),
graph.number_of_vertices,
graph.number_of_edges);
current_graph.get_source_indices(src_indices_v.data().get());
while (num_level < max_level) {
//
// Sum the weights of all edges departing a vertex. This is
// loop invariant, so we'll compute it here.
//
// Cluster weights are equivalent to vertex weights with this initial
// graph
//
#ifdef TIMING
hr_timer.start("init");
#endif
cugraph::detail::compute_vertex_sums(current_graph, vertex_weights_v, stream);
thrust::copy(rmm::exec_policy(stream)->on(stream),
vertex_weights_v.begin(),
vertex_weights_v.end(),
cluster_weights_v.begin());
#ifdef TIMING
hr_timer.stop();
hr_timer.start("update_clustering");
#endif
weight_t new_Q = update_clustering_by_delta_modularity(total_edge_weight,
resolution,
current_graph,
src_indices_v,
vertex_weights_v,
cluster_weights_v,
cluster_v,
stream);
// After finding the initial unconstrained partition we use that partitioning as the constraint
// for the second round.
rmm::device_vector<vertex_t> constraint(graph.number_of_vertices);
thrust::copy(
rmm::exec_policy(stream)->on(stream), cluster_v.begin(), cluster_v.end(), constraint.begin());
new_Q = update_clustering_by_delta_modularity_constrained(total_edge_weight,
resolution,
current_graph,
src_indices_v,
vertex_weights_v,
cluster_weights_v,
cluster_v,
constraint,
stream);
#ifdef TIMING
hr_timer.stop();
#endif
if (new_Q <= best_modularity) { break; }
best_modularity = new_Q;
#ifdef TIMING
hr_timer.start("shrinking graph");
#endif
// renumber the clusters to the range 0..(num_clusters-1)
vertex_t num_clusters = renumber_clusters(
graph.number_of_vertices, cluster_v, tmp_arr_v, cluster_inverse_v, cluster_vec, stream);
cluster_weights_v.resize(num_clusters);
// shrink our graph to represent the graph of supervertices
generate_superverticies_graph(current_graph, src_indices_v, num_clusters, cluster_v, stream);
// assign each new vertex to its own cluster
thrust::sequence(rmm::exec_policy(stream)->on(stream), cluster_v.begin(), cluster_v.end());
#ifdef TIMING
hr_timer.stop();
#endif
num_level++;
}
#ifdef TIMING
hr_timer.display(std::cout);
#endif
final_modularity = best_modularity;
}
template void leiden(GraphCSRView<int32_t, int32_t, float> const &,
float &,
int &,
int32_t *,
int,
float,
hipStream_t);
template void leiden(GraphCSRView<int32_t, int32_t, double> const &,
double &,
int &,
int32_t *,
int,
double,
hipStream_t);
} // namespace detail
} // namespace cugraph
| ca1738a6060a5b03bad978afeb7b6a21e7e9049e.cu | /*
* Copyright (c) 2020, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include <graph.hpp>
#include <rmm/thrust_rmm_allocator.h>
#include <community/louvain_kernels.hpp>
#include <utilities/graph_utils.cuh>
//#define TIMING
#ifdef TIMING
#include <utilities/high_res_timer.hpp>
#endif
#include <converters/COOtoCSR.cuh>
namespace cugraph {
namespace detail {
template <typename vertex_t, typename edge_t, typename weight_t>
weight_t update_clustering_by_delta_modularity_constrained(
weight_t total_edge_weight,
weight_t resolution,
GraphCSRView<vertex_t, edge_t, weight_t> const &graph,
rmm::device_vector<vertex_t> const &src_indices,
rmm::device_vector<weight_t> const &vertex_weights,
rmm::device_vector<weight_t> &cluster_weights,
rmm::device_vector<vertex_t> &cluster,
rmm::device_vector<vertex_t> &constraint,
cudaStream_t stream)
{
rmm::device_vector<vertex_t> next_cluster(cluster);
rmm::device_vector<weight_t> delta_Q(graph.number_of_edges);
rmm::device_vector<vertex_t> cluster_hash(graph.number_of_edges);
rmm::device_vector<weight_t> old_cluster_sum(graph.number_of_vertices);
weight_t *d_delta_Q = delta_Q.data().get();
vertex_t *d_constraint = constraint.data().get();
vertex_t const *d_src_indices = src_indices.data().get();
vertex_t const *d_dst_indices = graph.indices;
weight_t new_Q = modularity(total_edge_weight, resolution, graph, cluster.data().get(), stream);
weight_t cur_Q = new_Q - 1;
// To avoid the potential of having two vertices swap clusters
// we will only allow vertices to move up (true) or down (false)
// during each iteration of the loop
bool up_down = true;
while (new_Q > (cur_Q + 0.0001)) {
cur_Q = new_Q;
compute_delta_modularity(total_edge_weight,
resolution,
graph,
src_indices,
vertex_weights,
cluster_weights,
cluster,
cluster_hash,
delta_Q,
old_cluster_sum,
stream);
// Filter out positive delta_Q values for nodes not in the same constraint group
thrust::for_each(
rmm::exec_policy(stream)->on(stream),
thrust::make_counting_iterator(0),
thrust::make_counting_iterator(graph.number_of_edges),
[d_src_indices, d_dst_indices, d_constraint, d_delta_Q] __device__(vertex_t i) {
vertex_t start_cluster = d_constraint[d_src_indices[i]];
vertex_t end_cluster = d_constraint[d_dst_indices[i]];
if (start_cluster != end_cluster) d_delta_Q[i] = weight_t{0.0};
});
assign_nodes(graph,
delta_Q,
cluster_hash,
src_indices,
next_cluster,
vertex_weights,
cluster_weights,
up_down,
stream);
up_down = !up_down;
new_Q = modularity(total_edge_weight, resolution, graph, next_cluster.data().get(), stream);
if (new_Q > cur_Q) {
thrust::copy(rmm::exec_policy(stream)->on(stream),
next_cluster.begin(),
next_cluster.end(),
cluster.begin());
}
}
return cur_Q;
}
template float update_clustering_by_delta_modularity_constrained(
float,
float,
GraphCSRView<int32_t, int32_t, float> const &,
rmm::device_vector<int32_t> const &,
rmm::device_vector<float> const &,
rmm::device_vector<float> &,
rmm::device_vector<int32_t> &,
rmm::device_vector<int32_t> &,
cudaStream_t);
template double update_clustering_by_delta_modularity_constrained(
double,
double,
GraphCSRView<int32_t, int32_t, double> const &,
rmm::device_vector<int32_t> const &,
rmm::device_vector<double> const &,
rmm::device_vector<double> &,
rmm::device_vector<int32_t> &,
rmm::device_vector<int32_t> &,
cudaStream_t);
template <typename vertex_t, typename edge_t, typename weight_t>
void leiden(GraphCSRView<vertex_t, edge_t, weight_t> const &graph,
weight_t &final_modularity,
int &num_level,
vertex_t *cluster_vec,
int max_level,
weight_t resolution,
cudaStream_t stream)
{
#ifdef TIMING
HighResTimer hr_timer;
#endif
num_level = 0;
//
// Vectors to create a copy of the graph
//
rmm::device_vector<edge_t> offsets_v(graph.offsets, graph.offsets + graph.number_of_vertices + 1);
rmm::device_vector<vertex_t> indices_v(graph.indices, graph.indices + graph.number_of_edges);
rmm::device_vector<weight_t> weights_v(graph.edge_data, graph.edge_data + graph.number_of_edges);
rmm::device_vector<vertex_t> src_indices_v(graph.number_of_edges);
//
// Weights and clustering across iterations of algorithm
//
rmm::device_vector<weight_t> vertex_weights_v(graph.number_of_vertices);
rmm::device_vector<weight_t> cluster_weights_v(graph.number_of_vertices);
rmm::device_vector<vertex_t> cluster_v(graph.number_of_vertices);
//
// Temporaries used within kernels. Each iteration uses less
// of this memory
//
rmm::device_vector<vertex_t> tmp_arr_v(graph.number_of_vertices);
rmm::device_vector<vertex_t> cluster_inverse_v(graph.number_of_vertices);
weight_t total_edge_weight =
thrust::reduce(rmm::exec_policy(stream)->on(stream), weights_v.begin(), weights_v.end());
weight_t best_modularity = -1;
//
// Initialize every cluster to reference each vertex to itself
//
thrust::sequence(rmm::exec_policy(stream)->on(stream), cluster_v.begin(), cluster_v.end());
thrust::copy(
rmm::exec_policy(stream)->on(stream), cluster_v.begin(), cluster_v.end(), cluster_vec);
//
// Our copy of the graph. Each iteration of the outer loop will
// shrink this copy of the graph.
//
GraphCSRView<vertex_t, edge_t, weight_t> current_graph(offsets_v.data().get(),
indices_v.data().get(),
weights_v.data().get(),
graph.number_of_vertices,
graph.number_of_edges);
current_graph.get_source_indices(src_indices_v.data().get());
while (num_level < max_level) {
//
// Sum the weights of all edges departing a vertex. This is
// loop invariant, so we'll compute it here.
//
// Cluster weights are equivalent to vertex weights with this initial
// graph
//
#ifdef TIMING
hr_timer.start("init");
#endif
cugraph::detail::compute_vertex_sums(current_graph, vertex_weights_v, stream);
thrust::copy(rmm::exec_policy(stream)->on(stream),
vertex_weights_v.begin(),
vertex_weights_v.end(),
cluster_weights_v.begin());
#ifdef TIMING
hr_timer.stop();
hr_timer.start("update_clustering");
#endif
weight_t new_Q = update_clustering_by_delta_modularity(total_edge_weight,
resolution,
current_graph,
src_indices_v,
vertex_weights_v,
cluster_weights_v,
cluster_v,
stream);
// After finding the initial unconstrained partition we use that partitioning as the constraint
// for the second round.
rmm::device_vector<vertex_t> constraint(graph.number_of_vertices);
thrust::copy(
rmm::exec_policy(stream)->on(stream), cluster_v.begin(), cluster_v.end(), constraint.begin());
new_Q = update_clustering_by_delta_modularity_constrained(total_edge_weight,
resolution,
current_graph,
src_indices_v,
vertex_weights_v,
cluster_weights_v,
cluster_v,
constraint,
stream);
#ifdef TIMING
hr_timer.stop();
#endif
if (new_Q <= best_modularity) { break; }
best_modularity = new_Q;
#ifdef TIMING
hr_timer.start("shrinking graph");
#endif
// renumber the clusters to the range 0..(num_clusters-1)
vertex_t num_clusters = renumber_clusters(
graph.number_of_vertices, cluster_v, tmp_arr_v, cluster_inverse_v, cluster_vec, stream);
cluster_weights_v.resize(num_clusters);
// shrink our graph to represent the graph of supervertices
generate_superverticies_graph(current_graph, src_indices_v, num_clusters, cluster_v, stream);
// assign each new vertex to its own cluster
thrust::sequence(rmm::exec_policy(stream)->on(stream), cluster_v.begin(), cluster_v.end());
#ifdef TIMING
hr_timer.stop();
#endif
num_level++;
}
#ifdef TIMING
hr_timer.display(std::cout);
#endif
final_modularity = best_modularity;
}
template void leiden(GraphCSRView<int32_t, int32_t, float> const &,
float &,
int &,
int32_t *,
int,
float,
cudaStream_t);
template void leiden(GraphCSRView<int32_t, int32_t, double> const &,
double &,
int &,
int32_t *,
int,
double,
cudaStream_t);
} // namespace detail
} // namespace cugraph
|
ec57f31138db13eb0ee1ede1a1392d7ae78a5cf7.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
//
// Cardiff University | Computer Science
// Module: CM3203 One Semester Project (40 Credits)
// Title: Parallelisation of Matrix Exponentials in C++/CUDA for Quantum Control
// Date: 2016
//
// Author: Peter Davison
// Supervisor: Dr. Frank C Langbein
// Moderator: Dr. Irena Spasic
//
// Include header file
#include "CUDAMatrix.cuh"
// KERNELS
__global__ void cudaAdd(thrust::complex<double>* A, thrust::complex<double>* B, thrust::complex<double>* R, int n) {
int row = blockIdx.y * blockDim.y + threadIdx.y;
int col = blockIdx.x * blockDim.x + threadIdx.x;
if (row < n && col < n) {
R[row * n + col] = A[row * n + col] + B[row * n + col];
}
__syncthreads();
}
__global__ void cudaAddScalar(thrust::complex<double>* A, thrust::complex<double> scalar, thrust::complex<double>* R, int n) {
int row = blockIdx.y * blockDim.y + threadIdx.y;
int col = blockIdx.x * blockDim.x + threadIdx.x;
if (row < n && col < n) {
R[row * n + col] = A[row * n + col] + scalar;
}
__syncthreads();
}
__global__ void cudaSub(thrust::complex<double>* A, thrust::complex<double>* B, thrust::complex<double>* R, int n) {
int row = blockIdx.y * blockDim.y + threadIdx.y;
int col = blockIdx.x * blockDim.x + threadIdx.x;
if (row < n && col < n) {
R[row * n + col] = A[row * n + col] - B[row * n + col];
}
__syncthreads();
}
__global__ void cudaSubScalar(thrust::complex<double>* A, thrust::complex<double> scalar, thrust::complex<double>* R, int n) {
int row = blockIdx.y * blockDim.y + threadIdx.y;
int col = blockIdx.x * blockDim.x + threadIdx.x;
if (row < n && col < n) {
R[row * n + col] = A[row * n + col] - scalar;
}
__syncthreads();
}
__global__ void cudaMul(thrust::complex<double>* A, thrust::complex<double>* B, thrust::complex<double>* R, int n) {
thrust::complex<double> sum = 0;
int row = blockIdx.y * blockDim.y + threadIdx.y;
int col = blockIdx.x * blockDim.x + threadIdx.x;
if (row < n && col < n) {
for (int i = 0; i < n; i++) {
sum += A[row * n + i] * B[i * n + col];
}
}
R[row * n + col] = sum;
__syncthreads();
}
__global__ void cudaMulScalar(thrust::complex<double>* A, thrust::complex<double> scalar, thrust::complex<double>* R, int n) {
int row = blockIdx.y * blockDim.y + threadIdx.y;
int col = blockIdx.x * blockDim.x + threadIdx.x;
if (row < n && col < n) {
R[row * n + col] = A[row * n + col] * scalar;
}
__syncthreads();
}
__global__ void cudaAbs(thrust::complex<double>* A, thrust::complex<double>* R, int n) {
int row = blockIdx.y * blockDim.y + threadIdx.y;
int col = blockIdx.x * blockDim.x + threadIdx.x;
if (row < n && col < n) {
R[row * n + col] = abs(A[row * n + col]);
}
__syncthreads();
}
// MEMORY HANDLERS
void CUDAMatrix::alloc() {
h_matrix = (std::complex<double>*) malloc(size);
hipError_t result = hipMalloc((void**) &d_matrix, size);
if (result != hipSuccess) {
throw std::runtime_error("Failed to allocate device memory");
}
}
void CUDAMatrix::dealloc() {
free(h_matrix);
hipError_t result = hipFree(d_matrix);
if (result != hipSuccess) {
throw std::runtime_error("Failed to free device memory");
}
}
// CUDA STUFF
void CUDAMatrix::syncHost() {
if (isInitialised()) {
hipError_t result = hipMemcpy(h_matrix, d_matrix, size, hipMemcpyDeviceToHost);
if (result != hipSuccess) {
throw std::runtime_error("Failed to allocate device memory");
}
} else {
throw std::runtime_error("Cannot perform matrix operations before initialisation");
}
}
void CUDAMatrix::syncDevice() {
if (isInitialised()) {
hipError_t result = hipMemcpy(d_matrix, h_matrix, size, hipMemcpyHostToDevice);
if (result != hipSuccess) {
throw std::runtime_error("Failed to allocate device memory");
}
} else {
throw std::runtime_error("Cannot perform matrix operations before initialisation");
}
}
CUDAMatrix::cudaParams CUDAMatrix::getCUDAParams(int rows, int cols) {
cudaParams cp;
cp.tpb = dim3(rows, cols);
cp.bpg = dim3(1, 1);
if (rows*cols > 512) {
cp.tpb.x = 512;
cp.tpb.y = 512;
cp.bpg.x = (int) (ceil(double(rows) / double(cp.tpb.x)));
cp.bpg.y = (int) (ceil(double(cols) / double(cp.tpb.y)));
}
return cp;
}
// INTERNAL PADE APPROXIMATION CODE
int CUDAMatrix::ell(CUDAMatrix& A, double coef, int m) {
CUDAMatrix sA(A.getNumRows());
CUDAMatrix::abs(A, sA);
double scale = ::pow(coef, (1 / (double) (2 * m + 1)));
CUDAMatrix::mul(sA, scale, sA);
//double alpha = sA.getNormAm(2 * m + 1) / A.getNorm(1); 2 LINES BELOW ARE TEMPORARY REPLACEMENT
CUDAMatrix::pow(sA, (2 * m + 1), sA);
double alpha = sA.getNorm(1) / (double) (A.getNorm(1));
/////
return utils::max((int) (ceil(log2(2 * alpha / std::numeric_limits<double>::epsilon()) / (2 * m))), 0);
}
CUDAMatrix::padeParams CUDAMatrix::getPadeParams(CUDAMatrix& A) {
// Init
double d4, d6, d8, d10, eta1, eta3, eta4, eta5;
int ar = A.getNumRows();
int ac = A.getNumCols();
std::vector<double> theta;
std::vector<double> coef;
// Init P;
padeParams p;
p.pow.resize(11);
p.scale = 0;
// Get coefficients and theta values
coef = {
(1 / 100800.0),
(1 / 10059033600.0),
(1 / 4487938430976000.0),
(1 / 5914384781877411840000.0),
(1 / 113250775606021113483283660800000000.0)
};
theta = {
1.495585217958292e-002,
2.539398330063230e-001,
9.504178996162932e-001,
2.097847961257068e+000,
5.371920351148152e+000
};
// Get powers of A
p.pow[2] = new CUDAMatrix(ar, ac);
p.pow[4] = new CUDAMatrix(ar, ac);
p.pow[6] = new CUDAMatrix(ar, ac);
p.pow[8] = new CUDAMatrix(ar, ac);
p.pow[10] = new CUDAMatrix(ar, ac);
cudaParams cp = getCUDAParams(A.getNumRows(), A.getNumCols());
cudaMul KERNEL_ARGS2(cp.bpg, cp.tpb) (A.d_matrix, A.d_matrix, p.pow[2]->d_matrix, ar);
cudaMul KERNEL_ARGS2(cp.bpg, cp.tpb) (p.pow[2]->d_matrix, p.pow[2]->d_matrix, p.pow[4]->d_matrix, ar);
cudaMul KERNEL_ARGS2(cp.bpg, cp.tpb) (p.pow[2]->d_matrix, p.pow[4]->d_matrix, p.pow[6]->d_matrix, ar);
cudaMul KERNEL_ARGS2(cp.bpg, cp.tpb) (p.pow[4]->d_matrix, p.pow[4]->d_matrix, p.pow[8]->d_matrix, ar);
cudaMul KERNEL_ARGS2(cp.bpg, cp.tpb) (p.pow[4]->d_matrix, p.pow[6]->d_matrix, p.pow[10]->d_matrix, ar);
// NOT IDEAL .. PERFORM GETNORM ON DEVICE IF POSSIBLE. THIS MEANS SYNCING BETWEEN HOST AND DEVICE IS UNNECESSARY
p.pow[2]->syncHost();
p.pow[4]->syncHost();
p.pow[6]->syncHost();
p.pow[8]->syncHost();
p.pow[10]->syncHost();
////
// Find mVal
d4 = ::pow(p.pow[4]->getNorm(1), (1.0 / 4));
d6 = ::pow(p.pow[6]->getNorm(1), (1.0 / 6));
eta1 = utils::max(d4, d6);
if ((eta1 <= theta[0]) && (ell(A, coef[0], 3) == 0)) {
p.mVal = 3;
return p;
}
if ((eta1 <= theta[1]) && (ell(A, coef[1], 5) == 0)) {
p.mVal = 5;
return p;
}
if (true) { //(A.isSmall()) {
d8 = ::pow(p.pow[8]->getNorm(1), (1.0 / 8));
} else {
//d8 = pow(p.pow[4]->getNormAm(2), (1.0 / 8));
}
eta3 = utils::max(d6, d8);
if ((eta3 <= theta[2]) && (ell(A, coef[2], 7) == 0)) {
p.mVal = 7;
return p;
}
if ((eta3 <= theta[3]) && (ell(A, coef[3], 9) == 0)) {
p.mVal = 9;
return p;
}
if (true) { //(A.isSmall()) {
d10 = ::pow(p.pow[10]->getNorm(1), (1.0 / 10));
} else {
//d10 = ::pow(p.pow[2]->getNormAm(5), (1.0 / 10));
}
// Find scaling factor
eta4 = utils::max(d8, d10);
eta5 = utils::min(eta3, eta4);
p.scale = utils::max((int) (ceil(log2(eta5 / theta[4]))), 0);
CUDAMatrix sA(ar, ac);
double multiplier = 1.0 / ::pow(2, p.scale);
CUDAMatrix::mul(A, multiplier, sA);
p.scale += ell(sA, coef[4], 13);
if (std::isinf((double) p.scale)) {
std::cout << "S = INF" << std::endl;
int exp; // THIS CODE IS NOT ERROR CHECKED!!!!!
double t = ::frexp(A.getNorm(1) / theta[4], &exp);
p.scale = exp - (t == 0.5);
}
p.mVal = 13;
return p;
}
std::vector<double> CUDAMatrix::getPadeCoefficients(int m) {
switch (m) {
case 3:
return { 120, 60, 12, 1 };
case 5:
return { 30240, 15120, 3360, 420, 30, 1 };
case 7:
return { 17297280, 8648640, 1995840, 277200, 25200, 1512, 56, 1 };
case 9:
return { 17643225600, 8821612800, 2075673600, 302702400, 30270240, 2162160, 110880, 3960, 90, 1 };
case 13:
return { 64764752532480000, 32382376266240000, 7771770303897600, 1187353796428800, 129060195264000, 10559470521600, 670442572800, 33522128640, 1323241920, 40840800, 960960, 16380, 182, 1 };
default:
throw std::runtime_error("Invalid m value");
}
}
// CONSTRUCTORS
CUDAMatrix::CUDAMatrix() {
initialised = false;
}
CUDAMatrix::CUDAMatrix(int inNumRowsCols) {
init(inNumRowsCols, inNumRowsCols);
setMatrix(0.0);
}
CUDAMatrix::CUDAMatrix(int inNumRows, int inNumCols) {
init(inNumRows, inNumCols);
setMatrix(0.0);
}
CUDAMatrix::CUDAMatrix(int inNumRowsCols, std::initializer_list<std::complex<double>> inMatrix) {
if (inMatrix.size() == inNumRowsCols*inNumRowsCols) {
init(inNumRowsCols, inNumRowsCols);
setMatrix(inMatrix);
} else {
throw std::runtime_error("Initialiser-list size does not match matrix size");
}
}
CUDAMatrix::CUDAMatrix(int inNumRows, int inNumCols, std::initializer_list<std::complex<double>> inMatrix) {
if (inMatrix.size() == inNumRows*inNumCols) {
init(inNumRows, inNumCols);
setMatrix(inMatrix);
} else {
throw std::runtime_error("Initialiser-list size does not match matrix size");
}
}
CUDAMatrix::CUDAMatrix(const CUDAMatrix &obj) {
if (obj.initialised) {
h_matrix = obj.h_matrix;
d_matrix = obj.d_matrix;
numRows = obj.numRows;
numCols = obj.numCols;
numEls = obj.numEls;
size = obj.size;
initialised = obj.initialised;
} else {
throw std::runtime_error("Cannot copy uninitialised matrix");
}
}
void CUDAMatrix::init(int inNumRows, int inNumCols) {
numRows = inNumRows;
numCols = inNumCols;
numEls = inNumRows*inNumCols;
size = sizeof(std::complex<double>) * numEls;
alloc();
initialised = true;
}
CUDAMatrix::~CUDAMatrix() {
dealloc();
}
// MATRIX OPERATIONS
CUDATimer CUDAMatrix::add(CUDAMatrix& A, CUDAMatrix& B, CUDAMatrix& R) {
if (A.isInitialised() && B.isInitialised() && R.isInitialised()) {
int ar = A.getNumRows();
int ac = A.getNumCols();
int br = B.getNumRows();
int bc = B.getNumCols();
int rr = R.getNumRows();
int rc = R.getNumCols();
if (ar == ac && ac == br && br == bc && bc == rr && rr == rc) {
A.syncDevice();
B.syncDevice();
cudaParams cp = getCUDAParams(ar, ac);
CUDATimer t;
t.start();
cudaAdd KERNEL_ARGS2(cp.bpg, cp.tpb) (A.d_matrix, B.d_matrix, R.d_matrix, A.getNumRows());
t.stop();
R.syncHost();
return t;
} else {
throw std::runtime_error("Matrix sizes do not match");
}
} else {
throw std::runtime_error("Cannot perform matrix operations before initialisation");
}
}
CUDATimer CUDAMatrix::add(CUDAMatrix& A, std::complex<double> scalar, CUDAMatrix& R) {
if (A.isInitialised() && R.isInitialised()) {
int ar = A.getNumRows();
int ac = A.getNumCols();
int rr = R.getNumRows();
int rc = R.getNumCols();
if (ar == ac && ac == rr && rr == rc) {
A.syncDevice();
cudaParams cp = getCUDAParams(ar, ac);
CUDATimer t;
t.start();
cudaAddScalar KERNEL_ARGS2(cp.bpg, cp.tpb) (A.d_matrix, scalar, R.d_matrix, A.getNumRows());
t.stop();
R.syncHost();
return t;
} else {
throw std::runtime_error("Matrix sizes do not match");
}
} else {
throw std::runtime_error("Cannot perform matrix operations before initialisation");
}
}
CUDATimer CUDAMatrix::sub(CUDAMatrix& A, CUDAMatrix& B, CUDAMatrix& R) {
if (A.isInitialised() && B.isInitialised() && R.isInitialised()) {
int ar = A.getNumRows();
int ac = A.getNumCols();
int br = B.getNumRows();
int bc = B.getNumCols();
int rr = R.getNumRows();
int rc = R.getNumCols();
if (ar == ac && ac == br && br == bc && bc == rr && rr == rc) {
A.syncDevice();
B.syncDevice();
cudaParams cp = getCUDAParams(ar, ac);
CUDATimer t;
t.start();
cudaSub KERNEL_ARGS2(cp.bpg, cp.tpb) (A.d_matrix, B.d_matrix, R.d_matrix, A.getNumRows());
t.stop();
R.syncHost();
return t;
} else {
throw std::runtime_error("Matrix sizes do not match");
}
} else {
throw std::runtime_error("Cannot perform matrix operations before initialisation");
}
}
CUDATimer CUDAMatrix::sub(CUDAMatrix& A, std::complex<double> scalar, CUDAMatrix& R) {
if (A.isInitialised() && R.isInitialised()) {
int ar = A.getNumRows();
int ac = A.getNumCols();
int rr = R.getNumRows();
int rc = R.getNumCols();
if (ar == ac && ac == rr && rr == rc) {
A.syncDevice();
cudaParams cp = getCUDAParams(ar, ac);
CUDATimer t;
t.start();
cudaSubScalar KERNEL_ARGS2(cp.bpg, cp.tpb) (A.d_matrix, scalar, R.d_matrix, A.getNumRows());
t.stop();
R.syncHost();
return t;
} else {
throw std::runtime_error("Matrix sizes do not match");
}
} else {
throw std::runtime_error("Cannot perform matrix operations before initialisation");
}
}
CUDATimer CUDAMatrix::mul(CUDAMatrix& A, CUDAMatrix& B, CUDAMatrix& R) {
if (A.isInitialised() && B.isInitialised() && R.isInitialised()) {
int ar = A.getNumRows();
int ac = A.getNumCols();
int br = B.getNumRows();
int bc = B.getNumCols();
int rr = R.getNumRows();
int rc = R.getNumCols();
if (ar == ac && ac == br && br == bc && bc == rr && rr == rc) {
A.syncDevice();
B.syncDevice();
cudaParams cp = getCUDAParams(ar, ac);
CUDATimer t;
t.start();
cudaMul KERNEL_ARGS2(cp.bpg, cp.tpb) (A.d_matrix, B.d_matrix, R.d_matrix, A.getNumRows());
t.stop();
R.syncHost();
return t;
} else {
throw std::runtime_error("Matrix sizes do not match");
}
} else {
throw std::runtime_error("Cannot perform matrix operations before initialisation");
}
}
CUDATimer CUDAMatrix::mul(CUDAMatrix& A, std::complex<double> scalar, CUDAMatrix& R) {
if (A.isInitialised() && R.isInitialised()) {
int ar = A.getNumRows();
int ac = A.getNumCols();
int rr = R.getNumRows();
int rc = R.getNumCols();
if (ar == ac && ac == rr && rr == rc) {
A.syncDevice();
cudaParams cp = getCUDAParams(ar, ac);
CUDATimer t;
t.start();
cudaMulScalar KERNEL_ARGS2(cp.bpg, cp.tpb) (A.d_matrix, scalar, R.d_matrix, A.getNumRows());
t.stop();
R.syncHost();
return t;
} else {
throw std::runtime_error("Matrix sizes do not match");
}
} else {
throw std::runtime_error("Cannot perform matrix operations before initialisation");
}
}
CUDATimer CUDAMatrix::pow(CUDAMatrix& A, int pow, CUDAMatrix& R) {
if (A.isInitialised() && R.isInitialised()) {
int ar = A.getNumRows();
int ac = A.getNumCols();
int rr = R.getNumRows();
int rc = R.getNumCols();
if (ar == ac && ac == rr && rr == rc) {
A.syncDevice();
CUDAMatrix T(ar);
T.setIdentity();
T.syncDevice();
cudaParams cp = getCUDAParams(ar, ac);
CUDATimer t;
t.start();
for (int c1 = 0; c1 < pow; c1++) {
cudaMul KERNEL_ARGS2(cp.bpg, cp.tpb) (A.d_matrix, T.d_matrix, T.d_matrix, ar);
}
t.stop();
T.syncHost();
R.setMatrix(T.getMatrix());
return t;
} else {
throw std::runtime_error("Matrix sizes do not match");
}
} else {
throw std::runtime_error("Cannot perform matrix operations before initialisation");
}
}
CUDATimer CUDAMatrix::inv(CUDAMatrix& A, CUDAMatrix& R) {
if (A.isInitialised() && R.isInitialised()) {
int ar = A.getNumRows();
int ac = A.getNumCols();
int rr = R.getNumRows();
int rc = R.getNumCols();
if (ar == ac && ac == rr && rr == rc) {
CUDATimer t;
CUDAMatrix L = CUDAMatrix(ar, ac);
CUDAMatrix U = CUDAMatrix(ar, ac);
CUDAMatrix Z = CUDAMatrix(ar, ac);
CUDAMatrix I = CUDAMatrix(ar, ac);
I.setIdentity();
t.start();
int n = ar;
int i, j, k;
// LU Decomposition
for (i = 0; i < n; i++) {
for (j = 0; j < n; j++) {
if (j < i) {
U.setCell(i, j, 0);
} else {
U.setCell(i, j, A.getCell(i, j));
for (k = 0; k < i; k++) {
U.setCell(i, j, (U.getCell(i, j) - U.getCell(k, j) * L.getCell(i, k)));
}
}
}
for (j = 0; j < n; j++) {
if (j < i) {
L.setCell(j, i, 0);
} else if (j == i) {
L.setCell(j, i, 1);
} else {
L.setCell(j, i, (A.getCell(j, i) / U.getCell(i, i)));
for (k = 0; k < i; k++) {
L.setCell(j, i, (L.getCell(j, i) - ((U.getCell(k, i) * L.getCell(j, k)) / U.getCell(i, i))));
}
}
}
}
for (i = 0; i < n; i++) {
// Find Z (L^-1) with Forward Substitution
for (j = 0; j < n; j++) {
Z.setCell(j, i, I.getCell(j, i));
for (k = 0; k < n; k++) {
if (k != j) {
Z.setCell(j, i, (Z.getCell(j, i) - (L.getCell(j, k) * Z.getCell(k, i))));
}
}
}
// Find X (A^-1) with Backward Substitution
for (j = n - 1; j >= 0; j--) {
R.setCell(j, i, Z.getCell(j, i));
for (k = 0; k < n; k++) {
if (k != j) {
R.setCell(j, i, (R.getCell(j, i) - (U.getCell(j, k) * R.getCell(k, i))));
}
}
R.setCell(j, i, R.getCell(j, i) / U.getCell(j, j));
}
}
t.stop();
return t;
} else {
throw std::runtime_error("Matrix sizes do not match");
}
} else {
throw std::runtime_error("Cannot perform matrix operations before initialisation");
}
}
CUDATimer CUDAMatrix::tra(CUDAMatrix& A, CUDAMatrix& R) {
if (A.isInitialised() && R.isInitialised()) {
int ar = A.getNumRows();
int ac = A.getNumCols();
int rr = R.getNumRows();
int rc = R.getNumCols();
if (ac == rr) {
A.syncDevice();
int c1, c2;
CUDATimer t;
t.start();
for (c1 = 0; c1 < A.getNumRows(); c1++) {
for (c2 = 0; c2 < A.getNumCols(); c2++) {
R.setCell(c1, c2, A.getCell(c2, c1));
}
}
t.stop();
R.syncDevice();
return t;
} else {
throw std::runtime_error("Transpose matrix is the wrong size");
}
} else {
throw std::runtime_error("Cannot perform matrix operations before initialisation");
}
}
CUDATimer CUDAMatrix::exp(CUDAMatrix& A, CUDAMatrix& R) {
if (A.isInitialised() && R.isInitialised()) {
int ar = A.getNumRows();
int ac = A.getNumCols();
int rr = R.getNumRows();
int rc = R.getNumCols();
if (ar == ac && ac == rr && rr == rc) {
A.syncDevice();
CUDATimer t;
int c1, c2;
int n = utils::max(ar, ac);
// Special Cases
if (A.isDiagonal()) {
t.start();
for (c1 = 0; c1 < n; c1++) {
R.setCell(c1, c1, ::exp(A.getCell(c1, c1)));
}
t.stop();
R.syncDevice();
} else if (A.isZero()) {
t.start();
R.setMatrix(0);
t.stop();
R.syncDevice();
// Normal Case
} else {
// Create Matrices
CUDAMatrix U(ar, ac);
CUDAMatrix V(ar, ac);
CUDAMatrix I(ar, ac); // Identity
CUDAMatrix T(ar, ac); // Tally
CUDAMatrix TMP(ar, ac); // Temporary
I.setIdentity();
I.syncDevice();
// Get CUDA params
cudaParams cp = getCUDAParams(ar, ac);
// Get Pade params
padeParams p = getPadeParams(A);
int s = p.scale;
int m = p.mVal;
std::vector<CUDAMatrix*> pow = p.pow;
// Get Pade coefficients
std::vector<double> c = getPadeCoefficients(m);
// Start timer
t.start();
// Scaling
if (s != 0) {
double multiplier;
multiplier = 1.0 / ::pow(2, s);
cudaMulScalar KERNEL_ARGS2(cp.bpg, cp.tpb) (A.d_matrix, multiplier, A.d_matrix, n);
for (c1 = 2; c1 <= 6; c1 += 2) {
multiplier = 1.0 / ::pow(2, (s * c1));
cudaMulScalar KERNEL_ARGS2(cp.bpg, cp.tpb) (pow[c1]->d_matrix, multiplier, pow[c1]->d_matrix, n);
}
}
// Approximation
if (m == 3 || m == 5 || m == 7 || m == 9) {
for (c1 = (int) (pow.size()) + 2; c1 < m - 1; c1 += 2) { //for (k = strt:2:m-1)
cudaMul KERNEL_ARGS2(cp.bpg, cp.tpb) (pow[c1 - 2]->d_matrix, pow[2]->d_matrix, pow[c1]->d_matrix, n);
}
cudaMulScalar KERNEL_ARGS2(cp.bpg, cp.tpb) (I.d_matrix, c[1], U.d_matrix, n);
cudaMulScalar KERNEL_ARGS2(cp.bpg, cp.tpb) (I.d_matrix, c[0], V.d_matrix, n);
for (c2 = m; c2 >= 3; c2 -= 2) { //for (j = m : -2 : 3)
cudaMulScalar KERNEL_ARGS2(cp.bpg, cp.tpb) (pow[c2 - 1]->d_matrix, c[c2], TMP.d_matrix, n);
cudaAdd KERNEL_ARGS2(cp.bpg, cp.tpb) (U.d_matrix, TMP.d_matrix, U.d_matrix, n);
cudaMulScalar KERNEL_ARGS2(cp.bpg, cp.tpb) (pow[c2 - 1]->d_matrix, c[c2-1], TMP.d_matrix, n);
cudaAdd KERNEL_ARGS2(cp.bpg, cp.tpb) (V.d_matrix, TMP.d_matrix, V.d_matrix, n);
}
cudaMul KERNEL_ARGS2(cp.bpg, cp.tpb) (U.d_matrix, A.d_matrix, U.d_matrix, n);
} else if (m == 13) {
// This is the equivellent of ..
// U = A * (p[6] * (c[13] * p[6] + c[11] * p[4] + c[9] * p[2]) + c[7] * p[6] + c[5] * p[4] + c[3] * p[2] + c[1] * I); RUN IN STREAM 1
cudaMulScalar KERNEL_ARGS2(cp.bpg, cp.tpb) (pow[6]->d_matrix, c[13], T.d_matrix, n); // p[6] * c[13] -> T Needs new TMP var
cudaMulScalar KERNEL_ARGS2(cp.bpg, cp.tpb) (pow[4]->d_matrix, c[11], TMP.d_matrix, n); // p[4] * c[11] -> TMP (Cannot be used in multiple streams)
cudaAdd KERNEL_ARGS2(cp.bpg, cp.tpb) (T.d_matrix, TMP.d_matrix, T.d_matrix, n); // T + TMP -> T
cudaMulScalar KERNEL_ARGS2(cp.bpg, cp.tpb) (pow[2]->d_matrix, c[9], TMP.d_matrix, n); // p[2] * c[9] -> TMP
cudaAdd KERNEL_ARGS2(cp.bpg, cp.tpb) (T.d_matrix, TMP.d_matrix, T.d_matrix, n); // T + TMP -> T
cudaMul KERNEL_ARGS2(cp.bpg, cp.tpb) (pow[6]->d_matrix, T.d_matrix, T.d_matrix, n); // p[6] * T -> T
cudaMulScalar KERNEL_ARGS2(cp.bpg, cp.tpb) (pow[6]->d_matrix, c[7], TMP.d_matrix, n); // p[6] * c[7] -> TMP
cudaAdd KERNEL_ARGS2(cp.bpg, cp.tpb) (T.d_matrix, TMP.d_matrix, T.d_matrix, n); // T + TMP -> T
cudaMulScalar KERNEL_ARGS2(cp.bpg, cp.tpb) (pow[4]->d_matrix, c[5], TMP.d_matrix, n); // p[4] * c[5] -> TMP
cudaAdd KERNEL_ARGS2(cp.bpg, cp.tpb) (T.d_matrix, TMP.d_matrix, T.d_matrix, n); // T + TMP -> T
cudaMulScalar KERNEL_ARGS2(cp.bpg, cp.tpb) (pow[2]->d_matrix, c[3], TMP.d_matrix, n); // p[2] * c[3] -> TMP
cudaAdd KERNEL_ARGS2(cp.bpg, cp.tpb) (T.d_matrix, TMP.d_matrix, T.d_matrix, n); // T + TMP -> T
cudaMulScalar KERNEL_ARGS2(cp.bpg, cp.tpb) (I.d_matrix, c[1], TMP.d_matrix, n); // I * c[1] -> TMP
cudaAdd KERNEL_ARGS2(cp.bpg, cp.tpb) (T.d_matrix, TMP.d_matrix, T.d_matrix, n); // T + TMP -> T
cudaMul KERNEL_ARGS2(cp.bpg, cp.tpb) (A.d_matrix, T.d_matrix, U.d_matrix, n); // A * T -> U
// This is the equivellent of ..
//V = p[6] * (c[12] * p[6] + c[10] * p[4] + c[8] * p[2]) + c[6] * p[6] + c[4] * p[4] + c[2] * p[2] + c[0] * I; RUN IN STREAM 2
cudaMulScalar KERNEL_ARGS2(cp.bpg, cp.tpb) (pow[6]->d_matrix, c[12], T.d_matrix, n); // p[6] * c[12] -> T
cudaMulScalar KERNEL_ARGS2(cp.bpg, cp.tpb) (pow[4]->d_matrix, c[10], TMP.d_matrix, n); // p[4] * c[10] -> TMP
cudaAdd KERNEL_ARGS2(cp.bpg, cp.tpb) (T.d_matrix, TMP.d_matrix, T.d_matrix, n); // T + TMP -> T
cudaMulScalar KERNEL_ARGS2(cp.bpg, cp.tpb) (pow[2]->d_matrix, c[8], TMP.d_matrix, n); // p[2] * c[8] -> TMP
cudaAdd KERNEL_ARGS2(cp.bpg, cp.tpb) (T.d_matrix, TMP.d_matrix, T.d_matrix, n); // T + TMP -> T
cudaMul KERNEL_ARGS2(cp.bpg, cp.tpb) (pow[6]->d_matrix, T.d_matrix, T.d_matrix, n); // p[6] -> T
cudaMulScalar KERNEL_ARGS2(cp.bpg, cp.tpb) (pow[6]->d_matrix, c[6], TMP.d_matrix, n); // p[6] * c[6] -> TMP
cudaAdd KERNEL_ARGS2(cp.bpg, cp.tpb) (T.d_matrix, TMP.d_matrix, T.d_matrix, n); // T + TMP -> T
cudaMulScalar KERNEL_ARGS2(cp.bpg, cp.tpb) (pow[4]->d_matrix, c[4], TMP.d_matrix, n); // p[4] * c[4] -> TMP
cudaAdd KERNEL_ARGS2(cp.bpg, cp.tpb) (T.d_matrix, TMP.d_matrix, T.d_matrix, n); // T + TMP -> T
cudaMulScalar KERNEL_ARGS2(cp.bpg, cp.tpb) (pow[2]->d_matrix, c[2], TMP.d_matrix, n); // p[2] * c[2] -> TMP
cudaAdd KERNEL_ARGS2(cp.bpg, cp.tpb) (T.d_matrix, TMP.d_matrix, T.d_matrix, n); // T + TMP -> T
cudaMulScalar KERNEL_ARGS2(cp.bpg, cp.tpb) (I.d_matrix, c[0], TMP.d_matrix, n); // I * c[0] -> TMP
cudaAdd KERNEL_ARGS2(cp.bpg, cp.tpb) (T.d_matrix, TMP.d_matrix, V.d_matrix, n); // T + TMP -> V
}
// This is the equivellent of ..
// R = (V - U) / (2 * U) + I; ||?? R = (-U + V) / (U + V);
cudaSub KERNEL_ARGS2(cp.bpg, cp.tpb) (V.d_matrix, U.d_matrix, T.d_matrix, n);
cudaMulScalar KERNEL_ARGS2(cp.bpg, cp.tpb) (U.d_matrix, 2, TMP.d_matrix, n);
//cudaInv KERNEL_ARGS2(cp.bpg, cp.tpb) (TMP.d_matrix, TMP.d_matrix, n); // TEMP CODE BELOW
T.syncHost();
CUDAMatrix::inv(T, T);
T.syncDevice();
//
cudaMul KERNEL_ARGS2(cp.bpg, cp.tpb) (T.d_matrix, TMP.d_matrix, T.d_matrix, n);
cudaAdd KERNEL_ARGS2(cp.bpg, cp.tpb) (T.d_matrix, I.d_matrix, R.d_matrix, n);
// Squaring
for (int k = 0; k < s; k++) {
cudaMul KERNEL_ARGS2(cp.bpg, cp.tpb) (R.d_matrix, R.d_matrix, R.d_matrix, n);
}
hipDeviceSynchronize();
t.stop();
R.syncHost();
}
return t;
} else {
throw std::runtime_error("Matrix sizez do not match");
}
} else {
throw std::runtime_error("Cannot perform matrix operations before initialisation");
}
}
CUDATimer CUDAMatrix::abs(CUDAMatrix& A, CUDAMatrix& R) {
if (A.isInitialised() && R.isInitialised()) {
int ar = A.getNumRows();
int ac = A.getNumCols();
int rr = R.getNumRows();
int rc = R.getNumCols();
if (ar == ac && ac == rr && rr == rc) {
A.syncDevice();
cudaParams cp = getCUDAParams(ar, ac);
CUDATimer t;
t.start();
cudaAbs KERNEL_ARGS2(cp.bpg, cp.tpb) (A.d_matrix, R.d_matrix, ar);
t.stop();
R.syncHost();
return t;
} else {
throw std::runtime_error("Matrix sizes do not match");
}
} else {
throw std::runtime_error("Cannot perform matrix operations before initialisation");
}
}
// BOOLEANS
bool CUDAMatrix::isInitialised() {
return initialised;
}
bool CUDAMatrix::isSquare() {
if (initialised) {
if (numCols == numRows) {
return true;
} else {
return false;
}
} else {
throw std::runtime_error("Cannot perform matrix operations before initialisation");
}
}
bool CUDAMatrix::isDiagonal() {
if (initialised) {
if (!isSquare()) {
return false;
}
for (int c1 = 0; c1 < numRows; c1++) {
for (int c2 = 0; c2 < numCols; c2++) {
if (c1 != c2 && getCell(c1, c2) != 0.0) {
return false;
}
}
}
return true;
} else {
throw std::runtime_error("Cannot perform matrix operations before initialisation");
}
}
bool CUDAMatrix::isIdentity() {
if (initialised) {
for (int c1 = 0; c1 < numRows; c1++) {
for (int c2 = 0; c2 < numCols; c2++) {
if ((c1 != c2 && getCell(c1, c2) != 0.0) || (c1 == c2 && getCell(c1, c2) != 1.0)) {
return false;
}
}
}
return true;
} else {
throw std::runtime_error("Cannot perform matrix operations before initialisation");
}
}
bool CUDAMatrix::isZero() {
if (initialised) {
for (int c1 = 0; c1 < numRows; c1++) {
for (int c2 = 0; c2 < numCols; c2++) {
if (getCell(c1, c2) != 0.0) {
return false;
}
}
}
return true;
} else {
throw std::runtime_error("Cannot perform matrix operations before initialisation");
}
}
bool CUDAMatrix::isSmall() {
return utils::max(numRows, numCols) < 150;
}
bool CUDAMatrix::isComplex() {
std::complex<double> cell;
for (int c1 = 0; c1 < numEls; c1++) {
cell = getCell(c1);
if (cell.imag() != 0.0) {
return true;
}
}
return false;
}
// SETTERS
void CUDAMatrix::setCell(int row, int col, std::complex<double> val) {
if (isInitialised()) {
h_matrix[numCols * row + col] = val;
} else {
throw std::runtime_error("Cannot perform matrix operations before initialisation");
}
}
void CUDAMatrix::setCell(int i, std::complex<double> val) {
if (isInitialised()) {
h_matrix[i] = val;
} else {
throw std::runtime_error("Cannot perform matrix operations before initialisation");
}
}
void CUDAMatrix::setMatrix(std::complex<double> val) {
if (isInitialised()) {
for (int c1 = 0; c1 < getNumEls(); c1++) {
h_matrix[c1] = val;
}
} else {
throw std::runtime_error("Cannot perform matrix operations before initialisation");
}
}
void CUDAMatrix::setMatrix(std::complex<double>* inMatrix) {
if (isInitialised()) {
for (int c1 = 0; c1 < numEls; c1++) {
h_matrix[c1] = inMatrix[c1];
}
} else {
throw std::runtime_error("Cannot perform matrix operations before initialisation");
}
}
void CUDAMatrix::setMatrix(std::initializer_list<std::complex<double>> inMatrix) {
if (isInitialised()) {
if (inMatrix.size() == getNumEls()) {
std::copy(inMatrix.begin(), inMatrix.end(), h_matrix);
} else {
throw std::runtime_error("Initialiser-list size does not match matrix size");
}
} else {
throw std::runtime_error("Cannot perform matrix operations before initialisation");
}
}
void CUDAMatrix::setIdentity() {
if (isInitialised()) {
int row, col;
for (int c1 = 0; c1 < getNumEls(); c1++) {
row = getCurRow(c1);
col = getCurCol(c1);
if (row == col) {
h_matrix[c1] = 1;
} else {
h_matrix[c1] = 0;
}
}
} else {
throw std::runtime_error("Cannot perform matrix operations before initialisation");
}
}
void CUDAMatrix::setRandomDouble(double min, double max) {
if (isInitialised()) {
double r;
std::default_random_engine rng((unsigned int) (time(0)));
std::uniform_real_distribution<double> gen(min, max);
for (int c1 = 0; c1 < numEls; c1++) {
r = gen(rng);
setCell(c1, r);
}
} else {
throw std::runtime_error("Cannot perform matrix operations before initialisation");
}
}
void CUDAMatrix::setRandomInt(int min, int max) {
if (isInitialised()) {
int r;
std::default_random_engine rng((unsigned int) (time(0)));
std::uniform_int_distribution<int> gen(min, max);
for (int c1 = 0; c1 < numEls; c1++) {
r = gen(rng);
setCell(c1, r);
}
} else {
throw std::runtime_error("Cannot perform matrix operations before initialisation");
}
}
// GETTERS
double CUDAMatrix::getNorm(int n) {
int c1, c2;
double sum, max = 0;
if (n == 1) {
// 1 Norm
for (c1 = 0; c1 < numCols; c1++) {
sum = 0;
for (c2 = 0; c2 < numRows; c2++) {
sum += std::abs(getCell(c2, c1));
}
if (std::norm(sum) > std::norm(max)) {
max = sum;
}
}
return max;
} else if (n == INFINITY) {
// Inf Norm
for (c1 = 0; c1 < numRows; c1++) {
sum = 0;
for (c2 = 0; c2 < numCols; c2++) {
sum += std::abs(getCell(c2, c1));
}
if (std::norm(sum) > std::norm(max)) {
max = sum;
}
}
return max;
} else {
//// Euclidian Not called from anywhere. Requires SVD implementation to work.
//sum = 0;
//for (c1 = 0; c1 < numEls; c1++) {
// sum += ::pow(getCell(c1), n);
//}
//return ::pow(sum, 1.0 / n);
return -1;
}
}
int CUDAMatrix::getCurRow(int i) {
if (isInitialised()) {
return (int) (floor(i / numCols));
} else {
throw std::runtime_error("Cannot perform matrix operations before initialisation");
}
}
int CUDAMatrix::getCurCol(int i) {
if (isInitialised()) {
return (int) (i - (numCols*getCurRow(i)));
} else {
throw std::runtime_error("Cannot perform matrix operations before initialisation");
}
}
std::complex<double> CUDAMatrix::getCell(int row, int col) {
if (isInitialised()) {
return h_matrix[row*numCols + col];
} else {
throw std::runtime_error("Cannot perform matrix operations before initialisation");
}
}
std::complex<double> CUDAMatrix::getCell(int i) {
if (isInitialised()) {
return h_matrix[i];
} else {
throw std::runtime_error("Cannot perform matrix operations before initialisation");
}
}
std::complex<double>* CUDAMatrix::getMatrix() {
if (isInitialised()) {
return h_matrix;
} else {
throw std::runtime_error("Cannot perform matrix operations before initialisation");
}
}
int CUDAMatrix::getNumRows() {
if (isInitialised()) {
return numRows;
} else {
throw std::runtime_error("Cannot perform matrix operations before initialisation");
}
}
int CUDAMatrix::getNumCols() {
if (isInitialised()) {
return numCols;
} else {
throw std::runtime_error("Cannot perform matrix operations before initialisation");
}
}
int CUDAMatrix::getNumEls() {
if (isInitialised()) {
return numEls;
} else {
throw std::runtime_error("Cannot perform matrix operations before initialisation");
}
}
size_t CUDAMatrix::getSize() {
if (isInitialised()) {
return size;
} else {
throw std::runtime_error("Cannot perform matrix operations before initialisation");
}
}
// UTILS
int utils::getNumDigits(double x) {
if (x > 1.0 || x < -1.0) {
return (int) (floor(log10(abs(x))) + 1);
}
return 1;
}
int utils::max(int x, int y) {
if (x > y) {
return x;
} else {
return y;
}
}
double utils::max(double x, double y) {
if (x > y) {
return x;
} else {
return y;
}
}
int utils::min(int x, int y) {
if (x < y) {
return x;
} else {
return y;
}
}
double utils::min(double x, double y) {
if (x < y) {
return x;
} else {
return y;
}
}
// OPERATOR OVERRIDES
std::ostream& operator<<(std::ostream& oStream, CUDAMatrix& A) {
if (A.isInitialised()) {
// Init
std::complex<double> cell;
bool isComplex = A.isComplex();
bool scientific = false;
int c1, c2, r, i;
int realLength = 0, imagLength = 0, exp = 0;
double divider;
int precision = 0;
int maxFixedDigits = 4;
// Get info
for (c1 = 0; c1 < A.getNumEls(); c1++) {
cell = A.getCell(c1);
// Check if it's decimal
if ((cell.real() - (int) (cell.real())) != 0.0 ||
(cell.imag() - (int) (cell.imag())) != 0.0) {
precision = 4;
}
// Get maximum exponent
r = utils::getNumDigits(cell.real());
i = utils::getNumDigits(cell.imag());
if (r - 1 > exp) {
exp = r - 1;
realLength = r;
}
if (i - 1 > exp) {
exp = i - 1;
imagLength = i;
if (abs(cell.imag() == 1.0)) {
imagLength++;
}
}
}
// Check if the output should be in fixed or scientific form
if (exp >= maxFixedDigits) {
scientific = true;
}
// Get divider for scientific form
divider = ::pow(10, exp);
// Output name and multiplier
oStream << " = ";
if (scientific) {
oStream << "(10 ^ " << exp << ") *";
}
// Output cell
oStream << std::endl << std::setprecision(precision) << std::fixed;
for (c1 = 0; c1 < A.getNumEls(); c1++) {
cell = A.getCell(c1);
oStream << "| ";
// Spacing and formatting for scientific/fixed
if (scientific) {
cell /= divider;
} else {
r = utils::getNumDigits(cell.real());
for (c2 = 0; c2 < (realLength - r); c2++) {
oStream << " ";
}
}
// Output real
oStream << cell.real() << " ";
// Output complex
if (isComplex) {
if (cell.imag() != 0.0) {
if (cell.imag() > 0.0) {
oStream << "+ ";
} else {
oStream << "- ";
}
if (abs(cell.imag()) != 1.0) {
oStream << std::abs(cell.imag());
} else {
oStream << " ";
}
oStream << "i ";
} else {
i = utils::getNumDigits(cell.imag());
for (c2 = 0; c2 < imagLength + 3; c2++) {
oStream << " ";
}
}
}
// Output new line if row end reached
if (A.getCurRow(c1 + 1) > A.getCurRow(c1)) {
oStream << "|";
if (A.getCurRow(c1 + 1) < A.getNumRows()) {
oStream << std::endl;
}
}
}
oStream << std::endl;
return oStream;
// // Get precision
// cell = A.getCell(c1);
// if ((cell - (int) (cell)) != 0.0) {
// precision = 5;
// }
// // Get maximum number length
// length = utils::getNumDigits(cell);
// if (length > maxLength) {
// maxLength = length;
// }
//}
//for (c1 = 0; c1 < A.getNumEls(); c1++) {
// cell = A.getCell(c1);
// // Remove negative zeros
// if (cell == 0.0) {
// cell = 0;
// }
// oStream << "| ";
// // Add whitespace if shorter than maxLength
// length = utils::getNumDigits(cell);
// for (c2 = 0; c2 < (maxLength - length); c2++) {
// oStream << " ";
// }
// // Output number
// oStream << std::setprecision(precision) << std::fixed << cell << " ";
// // Output new line if row end reached
// if (A.getCurRow(c1 + 1) > A.getCurRow(c1)) {
// oStream << "|";
// if (A.getCurRow(c1 + 1) < A.getNumRows()) {
// oStream << std::endl;
// }
// }
} else {
throw std::runtime_error("Cannot perform matrix operations before initialisation");
}
} | ec57f31138db13eb0ee1ede1a1392d7ae78a5cf7.cu | //
// Cardiff University | Computer Science
// Module: CM3203 One Semester Project (40 Credits)
// Title: Parallelisation of Matrix Exponentials in C++/CUDA for Quantum Control
// Date: 2016
//
// Author: Peter Davison
// Supervisor: Dr. Frank C Langbein
// Moderator: Dr. Irena Spasic
//
// Include header file
#include "CUDAMatrix.cuh"
// KERNELS
__global__ void cudaAdd(thrust::complex<double>* A, thrust::complex<double>* B, thrust::complex<double>* R, int n) {
int row = blockIdx.y * blockDim.y + threadIdx.y;
int col = blockIdx.x * blockDim.x + threadIdx.x;
if (row < n && col < n) {
R[row * n + col] = A[row * n + col] + B[row * n + col];
}
__syncthreads();
}
__global__ void cudaAddScalar(thrust::complex<double>* A, thrust::complex<double> scalar, thrust::complex<double>* R, int n) {
int row = blockIdx.y * blockDim.y + threadIdx.y;
int col = blockIdx.x * blockDim.x + threadIdx.x;
if (row < n && col < n) {
R[row * n + col] = A[row * n + col] + scalar;
}
__syncthreads();
}
__global__ void cudaSub(thrust::complex<double>* A, thrust::complex<double>* B, thrust::complex<double>* R, int n) {
int row = blockIdx.y * blockDim.y + threadIdx.y;
int col = blockIdx.x * blockDim.x + threadIdx.x;
if (row < n && col < n) {
R[row * n + col] = A[row * n + col] - B[row * n + col];
}
__syncthreads();
}
__global__ void cudaSubScalar(thrust::complex<double>* A, thrust::complex<double> scalar, thrust::complex<double>* R, int n) {
int row = blockIdx.y * blockDim.y + threadIdx.y;
int col = blockIdx.x * blockDim.x + threadIdx.x;
if (row < n && col < n) {
R[row * n + col] = A[row * n + col] - scalar;
}
__syncthreads();
}
__global__ void cudaMul(thrust::complex<double>* A, thrust::complex<double>* B, thrust::complex<double>* R, int n) {
thrust::complex<double> sum = 0;
int row = blockIdx.y * blockDim.y + threadIdx.y;
int col = blockIdx.x * blockDim.x + threadIdx.x;
if (row < n && col < n) {
for (int i = 0; i < n; i++) {
sum += A[row * n + i] * B[i * n + col];
}
}
R[row * n + col] = sum;
__syncthreads();
}
__global__ void cudaMulScalar(thrust::complex<double>* A, thrust::complex<double> scalar, thrust::complex<double>* R, int n) {
int row = blockIdx.y * blockDim.y + threadIdx.y;
int col = blockIdx.x * blockDim.x + threadIdx.x;
if (row < n && col < n) {
R[row * n + col] = A[row * n + col] * scalar;
}
__syncthreads();
}
__global__ void cudaAbs(thrust::complex<double>* A, thrust::complex<double>* R, int n) {
int row = blockIdx.y * blockDim.y + threadIdx.y;
int col = blockIdx.x * blockDim.x + threadIdx.x;
if (row < n && col < n) {
R[row * n + col] = abs(A[row * n + col]);
}
__syncthreads();
}
// MEMORY HANDLERS
void CUDAMatrix::alloc() {
h_matrix = (std::complex<double>*) malloc(size);
cudaError_t result = cudaMalloc((void**) &d_matrix, size);
if (result != cudaSuccess) {
throw std::runtime_error("Failed to allocate device memory");
}
}
void CUDAMatrix::dealloc() {
free(h_matrix);
cudaError_t result = cudaFree(d_matrix);
if (result != cudaSuccess) {
throw std::runtime_error("Failed to free device memory");
}
}
// CUDA STUFF
void CUDAMatrix::syncHost() {
if (isInitialised()) {
cudaError_t result = cudaMemcpy(h_matrix, d_matrix, size, cudaMemcpyDeviceToHost);
if (result != cudaSuccess) {
throw std::runtime_error("Failed to allocate device memory");
}
} else {
throw std::runtime_error("Cannot perform matrix operations before initialisation");
}
}
void CUDAMatrix::syncDevice() {
if (isInitialised()) {
cudaError_t result = cudaMemcpy(d_matrix, h_matrix, size, cudaMemcpyHostToDevice);
if (result != cudaSuccess) {
throw std::runtime_error("Failed to allocate device memory");
}
} else {
throw std::runtime_error("Cannot perform matrix operations before initialisation");
}
}
CUDAMatrix::cudaParams CUDAMatrix::getCUDAParams(int rows, int cols) {
cudaParams cp;
cp.tpb = dim3(rows, cols);
cp.bpg = dim3(1, 1);
if (rows*cols > 512) {
cp.tpb.x = 512;
cp.tpb.y = 512;
cp.bpg.x = (int) (ceil(double(rows) / double(cp.tpb.x)));
cp.bpg.y = (int) (ceil(double(cols) / double(cp.tpb.y)));
}
return cp;
}
// INTERNAL PADE APPROXIMATION CODE
int CUDAMatrix::ell(CUDAMatrix& A, double coef, int m) {
CUDAMatrix sA(A.getNumRows());
CUDAMatrix::abs(A, sA);
double scale = std::pow(coef, (1 / (double) (2 * m + 1)));
CUDAMatrix::mul(sA, scale, sA);
//double alpha = sA.getNormAm(2 * m + 1) / A.getNorm(1); 2 LINES BELOW ARE TEMPORARY REPLACEMENT
CUDAMatrix::pow(sA, (2 * m + 1), sA);
double alpha = sA.getNorm(1) / (double) (A.getNorm(1));
/////
return utils::max((int) (ceil(log2(2 * alpha / std::numeric_limits<double>::epsilon()) / (2 * m))), 0);
}
CUDAMatrix::padeParams CUDAMatrix::getPadeParams(CUDAMatrix& A) {
// Init
double d4, d6, d8, d10, eta1, eta3, eta4, eta5;
int ar = A.getNumRows();
int ac = A.getNumCols();
std::vector<double> theta;
std::vector<double> coef;
// Init P;
padeParams p;
p.pow.resize(11);
p.scale = 0;
// Get coefficients and theta values
coef = {
(1 / 100800.0),
(1 / 10059033600.0),
(1 / 4487938430976000.0),
(1 / 5914384781877411840000.0),
(1 / 113250775606021113483283660800000000.0)
};
theta = {
1.495585217958292e-002,
2.539398330063230e-001,
9.504178996162932e-001,
2.097847961257068e+000,
5.371920351148152e+000
};
// Get powers of A
p.pow[2] = new CUDAMatrix(ar, ac);
p.pow[4] = new CUDAMatrix(ar, ac);
p.pow[6] = new CUDAMatrix(ar, ac);
p.pow[8] = new CUDAMatrix(ar, ac);
p.pow[10] = new CUDAMatrix(ar, ac);
cudaParams cp = getCUDAParams(A.getNumRows(), A.getNumCols());
cudaMul KERNEL_ARGS2(cp.bpg, cp.tpb) (A.d_matrix, A.d_matrix, p.pow[2]->d_matrix, ar);
cudaMul KERNEL_ARGS2(cp.bpg, cp.tpb) (p.pow[2]->d_matrix, p.pow[2]->d_matrix, p.pow[4]->d_matrix, ar);
cudaMul KERNEL_ARGS2(cp.bpg, cp.tpb) (p.pow[2]->d_matrix, p.pow[4]->d_matrix, p.pow[6]->d_matrix, ar);
cudaMul KERNEL_ARGS2(cp.bpg, cp.tpb) (p.pow[4]->d_matrix, p.pow[4]->d_matrix, p.pow[8]->d_matrix, ar);
cudaMul KERNEL_ARGS2(cp.bpg, cp.tpb) (p.pow[4]->d_matrix, p.pow[6]->d_matrix, p.pow[10]->d_matrix, ar);
// NOT IDEAL .. PERFORM GETNORM ON DEVICE IF POSSIBLE. THIS MEANS SYNCING BETWEEN HOST AND DEVICE IS UNNECESSARY
p.pow[2]->syncHost();
p.pow[4]->syncHost();
p.pow[6]->syncHost();
p.pow[8]->syncHost();
p.pow[10]->syncHost();
////
// Find mVal
d4 = std::pow(p.pow[4]->getNorm(1), (1.0 / 4));
d6 = std::pow(p.pow[6]->getNorm(1), (1.0 / 6));
eta1 = utils::max(d4, d6);
if ((eta1 <= theta[0]) && (ell(A, coef[0], 3) == 0)) {
p.mVal = 3;
return p;
}
if ((eta1 <= theta[1]) && (ell(A, coef[1], 5) == 0)) {
p.mVal = 5;
return p;
}
if (true) { //(A.isSmall()) {
d8 = std::pow(p.pow[8]->getNorm(1), (1.0 / 8));
} else {
//d8 = pow(p.pow[4]->getNormAm(2), (1.0 / 8));
}
eta3 = utils::max(d6, d8);
if ((eta3 <= theta[2]) && (ell(A, coef[2], 7) == 0)) {
p.mVal = 7;
return p;
}
if ((eta3 <= theta[3]) && (ell(A, coef[3], 9) == 0)) {
p.mVal = 9;
return p;
}
if (true) { //(A.isSmall()) {
d10 = std::pow(p.pow[10]->getNorm(1), (1.0 / 10));
} else {
//d10 = std::pow(p.pow[2]->getNormAm(5), (1.0 / 10));
}
// Find scaling factor
eta4 = utils::max(d8, d10);
eta5 = utils::min(eta3, eta4);
p.scale = utils::max((int) (ceil(log2(eta5 / theta[4]))), 0);
CUDAMatrix sA(ar, ac);
double multiplier = 1.0 / std::pow(2, p.scale);
CUDAMatrix::mul(A, multiplier, sA);
p.scale += ell(sA, coef[4], 13);
if (std::isinf((double) p.scale)) {
std::cout << "S = INF" << std::endl;
int exp; // THIS CODE IS NOT ERROR CHECKED!!!!!
double t = std::frexp(A.getNorm(1) / theta[4], &exp);
p.scale = exp - (t == 0.5);
}
p.mVal = 13;
return p;
}
std::vector<double> CUDAMatrix::getPadeCoefficients(int m) {
switch (m) {
case 3:
return { 120, 60, 12, 1 };
case 5:
return { 30240, 15120, 3360, 420, 30, 1 };
case 7:
return { 17297280, 8648640, 1995840, 277200, 25200, 1512, 56, 1 };
case 9:
return { 17643225600, 8821612800, 2075673600, 302702400, 30270240, 2162160, 110880, 3960, 90, 1 };
case 13:
return { 64764752532480000, 32382376266240000, 7771770303897600, 1187353796428800, 129060195264000, 10559470521600, 670442572800, 33522128640, 1323241920, 40840800, 960960, 16380, 182, 1 };
default:
throw std::runtime_error("Invalid m value");
}
}
// CONSTRUCTORS
CUDAMatrix::CUDAMatrix() {
initialised = false;
}
CUDAMatrix::CUDAMatrix(int inNumRowsCols) {
init(inNumRowsCols, inNumRowsCols);
setMatrix(0.0);
}
CUDAMatrix::CUDAMatrix(int inNumRows, int inNumCols) {
init(inNumRows, inNumCols);
setMatrix(0.0);
}
CUDAMatrix::CUDAMatrix(int inNumRowsCols, std::initializer_list<std::complex<double>> inMatrix) {
if (inMatrix.size() == inNumRowsCols*inNumRowsCols) {
init(inNumRowsCols, inNumRowsCols);
setMatrix(inMatrix);
} else {
throw std::runtime_error("Initialiser-list size does not match matrix size");
}
}
CUDAMatrix::CUDAMatrix(int inNumRows, int inNumCols, std::initializer_list<std::complex<double>> inMatrix) {
if (inMatrix.size() == inNumRows*inNumCols) {
init(inNumRows, inNumCols);
setMatrix(inMatrix);
} else {
throw std::runtime_error("Initialiser-list size does not match matrix size");
}
}
CUDAMatrix::CUDAMatrix(const CUDAMatrix &obj) {
if (obj.initialised) {
h_matrix = obj.h_matrix;
d_matrix = obj.d_matrix;
numRows = obj.numRows;
numCols = obj.numCols;
numEls = obj.numEls;
size = obj.size;
initialised = obj.initialised;
} else {
throw std::runtime_error("Cannot copy uninitialised matrix");
}
}
void CUDAMatrix::init(int inNumRows, int inNumCols) {
numRows = inNumRows;
numCols = inNumCols;
numEls = inNumRows*inNumCols;
size = sizeof(std::complex<double>) * numEls;
alloc();
initialised = true;
}
CUDAMatrix::~CUDAMatrix() {
dealloc();
}
// MATRIX OPERATIONS
CUDATimer CUDAMatrix::add(CUDAMatrix& A, CUDAMatrix& B, CUDAMatrix& R) {
if (A.isInitialised() && B.isInitialised() && R.isInitialised()) {
int ar = A.getNumRows();
int ac = A.getNumCols();
int br = B.getNumRows();
int bc = B.getNumCols();
int rr = R.getNumRows();
int rc = R.getNumCols();
if (ar == ac && ac == br && br == bc && bc == rr && rr == rc) {
A.syncDevice();
B.syncDevice();
cudaParams cp = getCUDAParams(ar, ac);
CUDATimer t;
t.start();
cudaAdd KERNEL_ARGS2(cp.bpg, cp.tpb) (A.d_matrix, B.d_matrix, R.d_matrix, A.getNumRows());
t.stop();
R.syncHost();
return t;
} else {
throw std::runtime_error("Matrix sizes do not match");
}
} else {
throw std::runtime_error("Cannot perform matrix operations before initialisation");
}
}
CUDATimer CUDAMatrix::add(CUDAMatrix& A, std::complex<double> scalar, CUDAMatrix& R) {
if (A.isInitialised() && R.isInitialised()) {
int ar = A.getNumRows();
int ac = A.getNumCols();
int rr = R.getNumRows();
int rc = R.getNumCols();
if (ar == ac && ac == rr && rr == rc) {
A.syncDevice();
cudaParams cp = getCUDAParams(ar, ac);
CUDATimer t;
t.start();
cudaAddScalar KERNEL_ARGS2(cp.bpg, cp.tpb) (A.d_matrix, scalar, R.d_matrix, A.getNumRows());
t.stop();
R.syncHost();
return t;
} else {
throw std::runtime_error("Matrix sizes do not match");
}
} else {
throw std::runtime_error("Cannot perform matrix operations before initialisation");
}
}
CUDATimer CUDAMatrix::sub(CUDAMatrix& A, CUDAMatrix& B, CUDAMatrix& R) {
if (A.isInitialised() && B.isInitialised() && R.isInitialised()) {
int ar = A.getNumRows();
int ac = A.getNumCols();
int br = B.getNumRows();
int bc = B.getNumCols();
int rr = R.getNumRows();
int rc = R.getNumCols();
if (ar == ac && ac == br && br == bc && bc == rr && rr == rc) {
A.syncDevice();
B.syncDevice();
cudaParams cp = getCUDAParams(ar, ac);
CUDATimer t;
t.start();
cudaSub KERNEL_ARGS2(cp.bpg, cp.tpb) (A.d_matrix, B.d_matrix, R.d_matrix, A.getNumRows());
t.stop();
R.syncHost();
return t;
} else {
throw std::runtime_error("Matrix sizes do not match");
}
} else {
throw std::runtime_error("Cannot perform matrix operations before initialisation");
}
}
CUDATimer CUDAMatrix::sub(CUDAMatrix& A, std::complex<double> scalar, CUDAMatrix& R) {
if (A.isInitialised() && R.isInitialised()) {
int ar = A.getNumRows();
int ac = A.getNumCols();
int rr = R.getNumRows();
int rc = R.getNumCols();
if (ar == ac && ac == rr && rr == rc) {
A.syncDevice();
cudaParams cp = getCUDAParams(ar, ac);
CUDATimer t;
t.start();
cudaSubScalar KERNEL_ARGS2(cp.bpg, cp.tpb) (A.d_matrix, scalar, R.d_matrix, A.getNumRows());
t.stop();
R.syncHost();
return t;
} else {
throw std::runtime_error("Matrix sizes do not match");
}
} else {
throw std::runtime_error("Cannot perform matrix operations before initialisation");
}
}
CUDATimer CUDAMatrix::mul(CUDAMatrix& A, CUDAMatrix& B, CUDAMatrix& R) {
if (A.isInitialised() && B.isInitialised() && R.isInitialised()) {
int ar = A.getNumRows();
int ac = A.getNumCols();
int br = B.getNumRows();
int bc = B.getNumCols();
int rr = R.getNumRows();
int rc = R.getNumCols();
if (ar == ac && ac == br && br == bc && bc == rr && rr == rc) {
A.syncDevice();
B.syncDevice();
cudaParams cp = getCUDAParams(ar, ac);
CUDATimer t;
t.start();
cudaMul KERNEL_ARGS2(cp.bpg, cp.tpb) (A.d_matrix, B.d_matrix, R.d_matrix, A.getNumRows());
t.stop();
R.syncHost();
return t;
} else {
throw std::runtime_error("Matrix sizes do not match");
}
} else {
throw std::runtime_error("Cannot perform matrix operations before initialisation");
}
}
CUDATimer CUDAMatrix::mul(CUDAMatrix& A, std::complex<double> scalar, CUDAMatrix& R) {
if (A.isInitialised() && R.isInitialised()) {
int ar = A.getNumRows();
int ac = A.getNumCols();
int rr = R.getNumRows();
int rc = R.getNumCols();
if (ar == ac && ac == rr && rr == rc) {
A.syncDevice();
cudaParams cp = getCUDAParams(ar, ac);
CUDATimer t;
t.start();
cudaMulScalar KERNEL_ARGS2(cp.bpg, cp.tpb) (A.d_matrix, scalar, R.d_matrix, A.getNumRows());
t.stop();
R.syncHost();
return t;
} else {
throw std::runtime_error("Matrix sizes do not match");
}
} else {
throw std::runtime_error("Cannot perform matrix operations before initialisation");
}
}
CUDATimer CUDAMatrix::pow(CUDAMatrix& A, int pow, CUDAMatrix& R) {
if (A.isInitialised() && R.isInitialised()) {
int ar = A.getNumRows();
int ac = A.getNumCols();
int rr = R.getNumRows();
int rc = R.getNumCols();
if (ar == ac && ac == rr && rr == rc) {
A.syncDevice();
CUDAMatrix T(ar);
T.setIdentity();
T.syncDevice();
cudaParams cp = getCUDAParams(ar, ac);
CUDATimer t;
t.start();
for (int c1 = 0; c1 < pow; c1++) {
cudaMul KERNEL_ARGS2(cp.bpg, cp.tpb) (A.d_matrix, T.d_matrix, T.d_matrix, ar);
}
t.stop();
T.syncHost();
R.setMatrix(T.getMatrix());
return t;
} else {
throw std::runtime_error("Matrix sizes do not match");
}
} else {
throw std::runtime_error("Cannot perform matrix operations before initialisation");
}
}
CUDATimer CUDAMatrix::inv(CUDAMatrix& A, CUDAMatrix& R) {
if (A.isInitialised() && R.isInitialised()) {
int ar = A.getNumRows();
int ac = A.getNumCols();
int rr = R.getNumRows();
int rc = R.getNumCols();
if (ar == ac && ac == rr && rr == rc) {
CUDATimer t;
CUDAMatrix L = CUDAMatrix(ar, ac);
CUDAMatrix U = CUDAMatrix(ar, ac);
CUDAMatrix Z = CUDAMatrix(ar, ac);
CUDAMatrix I = CUDAMatrix(ar, ac);
I.setIdentity();
t.start();
int n = ar;
int i, j, k;
// LU Decomposition
for (i = 0; i < n; i++) {
for (j = 0; j < n; j++) {
if (j < i) {
U.setCell(i, j, 0);
} else {
U.setCell(i, j, A.getCell(i, j));
for (k = 0; k < i; k++) {
U.setCell(i, j, (U.getCell(i, j) - U.getCell(k, j) * L.getCell(i, k)));
}
}
}
for (j = 0; j < n; j++) {
if (j < i) {
L.setCell(j, i, 0);
} else if (j == i) {
L.setCell(j, i, 1);
} else {
L.setCell(j, i, (A.getCell(j, i) / U.getCell(i, i)));
for (k = 0; k < i; k++) {
L.setCell(j, i, (L.getCell(j, i) - ((U.getCell(k, i) * L.getCell(j, k)) / U.getCell(i, i))));
}
}
}
}
for (i = 0; i < n; i++) {
// Find Z (L^-1) with Forward Substitution
for (j = 0; j < n; j++) {
Z.setCell(j, i, I.getCell(j, i));
for (k = 0; k < n; k++) {
if (k != j) {
Z.setCell(j, i, (Z.getCell(j, i) - (L.getCell(j, k) * Z.getCell(k, i))));
}
}
}
// Find X (A^-1) with Backward Substitution
for (j = n - 1; j >= 0; j--) {
R.setCell(j, i, Z.getCell(j, i));
for (k = 0; k < n; k++) {
if (k != j) {
R.setCell(j, i, (R.getCell(j, i) - (U.getCell(j, k) * R.getCell(k, i))));
}
}
R.setCell(j, i, R.getCell(j, i) / U.getCell(j, j));
}
}
t.stop();
return t;
} else {
throw std::runtime_error("Matrix sizes do not match");
}
} else {
throw std::runtime_error("Cannot perform matrix operations before initialisation");
}
}
CUDATimer CUDAMatrix::tra(CUDAMatrix& A, CUDAMatrix& R) {
if (A.isInitialised() && R.isInitialised()) {
int ar = A.getNumRows();
int ac = A.getNumCols();
int rr = R.getNumRows();
int rc = R.getNumCols();
if (ac == rr) {
A.syncDevice();
int c1, c2;
CUDATimer t;
t.start();
for (c1 = 0; c1 < A.getNumRows(); c1++) {
for (c2 = 0; c2 < A.getNumCols(); c2++) {
R.setCell(c1, c2, A.getCell(c2, c1));
}
}
t.stop();
R.syncDevice();
return t;
} else {
throw std::runtime_error("Transpose matrix is the wrong size");
}
} else {
throw std::runtime_error("Cannot perform matrix operations before initialisation");
}
}
CUDATimer CUDAMatrix::exp(CUDAMatrix& A, CUDAMatrix& R) {
if (A.isInitialised() && R.isInitialised()) {
int ar = A.getNumRows();
int ac = A.getNumCols();
int rr = R.getNumRows();
int rc = R.getNumCols();
if (ar == ac && ac == rr && rr == rc) {
A.syncDevice();
CUDATimer t;
int c1, c2;
int n = utils::max(ar, ac);
// Special Cases
if (A.isDiagonal()) {
t.start();
for (c1 = 0; c1 < n; c1++) {
R.setCell(c1, c1, std::exp(A.getCell(c1, c1)));
}
t.stop();
R.syncDevice();
} else if (A.isZero()) {
t.start();
R.setMatrix(0);
t.stop();
R.syncDevice();
// Normal Case
} else {
// Create Matrices
CUDAMatrix U(ar, ac);
CUDAMatrix V(ar, ac);
CUDAMatrix I(ar, ac); // Identity
CUDAMatrix T(ar, ac); // Tally
CUDAMatrix TMP(ar, ac); // Temporary
I.setIdentity();
I.syncDevice();
// Get CUDA params
cudaParams cp = getCUDAParams(ar, ac);
// Get Pade params
padeParams p = getPadeParams(A);
int s = p.scale;
int m = p.mVal;
std::vector<CUDAMatrix*> pow = p.pow;
// Get Pade coefficients
std::vector<double> c = getPadeCoefficients(m);
// Start timer
t.start();
// Scaling
if (s != 0) {
double multiplier;
multiplier = 1.0 / std::pow(2, s);
cudaMulScalar KERNEL_ARGS2(cp.bpg, cp.tpb) (A.d_matrix, multiplier, A.d_matrix, n);
for (c1 = 2; c1 <= 6; c1 += 2) {
multiplier = 1.0 / std::pow(2, (s * c1));
cudaMulScalar KERNEL_ARGS2(cp.bpg, cp.tpb) (pow[c1]->d_matrix, multiplier, pow[c1]->d_matrix, n);
}
}
// Approximation
if (m == 3 || m == 5 || m == 7 || m == 9) {
for (c1 = (int) (pow.size()) + 2; c1 < m - 1; c1 += 2) { //for (k = strt:2:m-1)
cudaMul KERNEL_ARGS2(cp.bpg, cp.tpb) (pow[c1 - 2]->d_matrix, pow[2]->d_matrix, pow[c1]->d_matrix, n);
}
cudaMulScalar KERNEL_ARGS2(cp.bpg, cp.tpb) (I.d_matrix, c[1], U.d_matrix, n);
cudaMulScalar KERNEL_ARGS2(cp.bpg, cp.tpb) (I.d_matrix, c[0], V.d_matrix, n);
for (c2 = m; c2 >= 3; c2 -= 2) { //for (j = m : -2 : 3)
cudaMulScalar KERNEL_ARGS2(cp.bpg, cp.tpb) (pow[c2 - 1]->d_matrix, c[c2], TMP.d_matrix, n);
cudaAdd KERNEL_ARGS2(cp.bpg, cp.tpb) (U.d_matrix, TMP.d_matrix, U.d_matrix, n);
cudaMulScalar KERNEL_ARGS2(cp.bpg, cp.tpb) (pow[c2 - 1]->d_matrix, c[c2-1], TMP.d_matrix, n);
cudaAdd KERNEL_ARGS2(cp.bpg, cp.tpb) (V.d_matrix, TMP.d_matrix, V.d_matrix, n);
}
cudaMul KERNEL_ARGS2(cp.bpg, cp.tpb) (U.d_matrix, A.d_matrix, U.d_matrix, n);
} else if (m == 13) {
// This is the equivellent of ..
// U = A * (p[6] * (c[13] * p[6] + c[11] * p[4] + c[9] * p[2]) + c[7] * p[6] + c[5] * p[4] + c[3] * p[2] + c[1] * I); RUN IN STREAM 1
cudaMulScalar KERNEL_ARGS2(cp.bpg, cp.tpb) (pow[6]->d_matrix, c[13], T.d_matrix, n); // p[6] * c[13] -> T Needs new TMP var
cudaMulScalar KERNEL_ARGS2(cp.bpg, cp.tpb) (pow[4]->d_matrix, c[11], TMP.d_matrix, n); // p[4] * c[11] -> TMP (Cannot be used in multiple streams)
cudaAdd KERNEL_ARGS2(cp.bpg, cp.tpb) (T.d_matrix, TMP.d_matrix, T.d_matrix, n); // T + TMP -> T
cudaMulScalar KERNEL_ARGS2(cp.bpg, cp.tpb) (pow[2]->d_matrix, c[9], TMP.d_matrix, n); // p[2] * c[9] -> TMP
cudaAdd KERNEL_ARGS2(cp.bpg, cp.tpb) (T.d_matrix, TMP.d_matrix, T.d_matrix, n); // T + TMP -> T
cudaMul KERNEL_ARGS2(cp.bpg, cp.tpb) (pow[6]->d_matrix, T.d_matrix, T.d_matrix, n); // p[6] * T -> T
cudaMulScalar KERNEL_ARGS2(cp.bpg, cp.tpb) (pow[6]->d_matrix, c[7], TMP.d_matrix, n); // p[6] * c[7] -> TMP
cudaAdd KERNEL_ARGS2(cp.bpg, cp.tpb) (T.d_matrix, TMP.d_matrix, T.d_matrix, n); // T + TMP -> T
cudaMulScalar KERNEL_ARGS2(cp.bpg, cp.tpb) (pow[4]->d_matrix, c[5], TMP.d_matrix, n); // p[4] * c[5] -> TMP
cudaAdd KERNEL_ARGS2(cp.bpg, cp.tpb) (T.d_matrix, TMP.d_matrix, T.d_matrix, n); // T + TMP -> T
cudaMulScalar KERNEL_ARGS2(cp.bpg, cp.tpb) (pow[2]->d_matrix, c[3], TMP.d_matrix, n); // p[2] * c[3] -> TMP
cudaAdd KERNEL_ARGS2(cp.bpg, cp.tpb) (T.d_matrix, TMP.d_matrix, T.d_matrix, n); // T + TMP -> T
cudaMulScalar KERNEL_ARGS2(cp.bpg, cp.tpb) (I.d_matrix, c[1], TMP.d_matrix, n); // I * c[1] -> TMP
cudaAdd KERNEL_ARGS2(cp.bpg, cp.tpb) (T.d_matrix, TMP.d_matrix, T.d_matrix, n); // T + TMP -> T
cudaMul KERNEL_ARGS2(cp.bpg, cp.tpb) (A.d_matrix, T.d_matrix, U.d_matrix, n); // A * T -> U
// This is the equivellent of ..
//V = p[6] * (c[12] * p[6] + c[10] * p[4] + c[8] * p[2]) + c[6] * p[6] + c[4] * p[4] + c[2] * p[2] + c[0] * I; RUN IN STREAM 2
cudaMulScalar KERNEL_ARGS2(cp.bpg, cp.tpb) (pow[6]->d_matrix, c[12], T.d_matrix, n); // p[6] * c[12] -> T
cudaMulScalar KERNEL_ARGS2(cp.bpg, cp.tpb) (pow[4]->d_matrix, c[10], TMP.d_matrix, n); // p[4] * c[10] -> TMP
cudaAdd KERNEL_ARGS2(cp.bpg, cp.tpb) (T.d_matrix, TMP.d_matrix, T.d_matrix, n); // T + TMP -> T
cudaMulScalar KERNEL_ARGS2(cp.bpg, cp.tpb) (pow[2]->d_matrix, c[8], TMP.d_matrix, n); // p[2] * c[8] -> TMP
cudaAdd KERNEL_ARGS2(cp.bpg, cp.tpb) (T.d_matrix, TMP.d_matrix, T.d_matrix, n); // T + TMP -> T
cudaMul KERNEL_ARGS2(cp.bpg, cp.tpb) (pow[6]->d_matrix, T.d_matrix, T.d_matrix, n); // p[6] -> T
cudaMulScalar KERNEL_ARGS2(cp.bpg, cp.tpb) (pow[6]->d_matrix, c[6], TMP.d_matrix, n); // p[6] * c[6] -> TMP
cudaAdd KERNEL_ARGS2(cp.bpg, cp.tpb) (T.d_matrix, TMP.d_matrix, T.d_matrix, n); // T + TMP -> T
cudaMulScalar KERNEL_ARGS2(cp.bpg, cp.tpb) (pow[4]->d_matrix, c[4], TMP.d_matrix, n); // p[4] * c[4] -> TMP
cudaAdd KERNEL_ARGS2(cp.bpg, cp.tpb) (T.d_matrix, TMP.d_matrix, T.d_matrix, n); // T + TMP -> T
cudaMulScalar KERNEL_ARGS2(cp.bpg, cp.tpb) (pow[2]->d_matrix, c[2], TMP.d_matrix, n); // p[2] * c[2] -> TMP
cudaAdd KERNEL_ARGS2(cp.bpg, cp.tpb) (T.d_matrix, TMP.d_matrix, T.d_matrix, n); // T + TMP -> T
cudaMulScalar KERNEL_ARGS2(cp.bpg, cp.tpb) (I.d_matrix, c[0], TMP.d_matrix, n); // I * c[0] -> TMP
cudaAdd KERNEL_ARGS2(cp.bpg, cp.tpb) (T.d_matrix, TMP.d_matrix, V.d_matrix, n); // T + TMP -> V
}
// This is the equivellent of ..
// R = (V - U) / (2 * U) + I; ||?? R = (-U + V) / (U + V);
cudaSub KERNEL_ARGS2(cp.bpg, cp.tpb) (V.d_matrix, U.d_matrix, T.d_matrix, n);
cudaMulScalar KERNEL_ARGS2(cp.bpg, cp.tpb) (U.d_matrix, 2, TMP.d_matrix, n);
//cudaInv KERNEL_ARGS2(cp.bpg, cp.tpb) (TMP.d_matrix, TMP.d_matrix, n); // TEMP CODE BELOW
T.syncHost();
CUDAMatrix::inv(T, T);
T.syncDevice();
//
cudaMul KERNEL_ARGS2(cp.bpg, cp.tpb) (T.d_matrix, TMP.d_matrix, T.d_matrix, n);
cudaAdd KERNEL_ARGS2(cp.bpg, cp.tpb) (T.d_matrix, I.d_matrix, R.d_matrix, n);
// Squaring
for (int k = 0; k < s; k++) {
cudaMul KERNEL_ARGS2(cp.bpg, cp.tpb) (R.d_matrix, R.d_matrix, R.d_matrix, n);
}
cudaThreadSynchronize();
t.stop();
R.syncHost();
}
return t;
} else {
throw std::runtime_error("Matrix sizez do not match");
}
} else {
throw std::runtime_error("Cannot perform matrix operations before initialisation");
}
}
CUDATimer CUDAMatrix::abs(CUDAMatrix& A, CUDAMatrix& R) {
if (A.isInitialised() && R.isInitialised()) {
int ar = A.getNumRows();
int ac = A.getNumCols();
int rr = R.getNumRows();
int rc = R.getNumCols();
if (ar == ac && ac == rr && rr == rc) {
A.syncDevice();
cudaParams cp = getCUDAParams(ar, ac);
CUDATimer t;
t.start();
cudaAbs KERNEL_ARGS2(cp.bpg, cp.tpb) (A.d_matrix, R.d_matrix, ar);
t.stop();
R.syncHost();
return t;
} else {
throw std::runtime_error("Matrix sizes do not match");
}
} else {
throw std::runtime_error("Cannot perform matrix operations before initialisation");
}
}
// BOOLEANS
bool CUDAMatrix::isInitialised() {
return initialised;
}
bool CUDAMatrix::isSquare() {
if (initialised) {
if (numCols == numRows) {
return true;
} else {
return false;
}
} else {
throw std::runtime_error("Cannot perform matrix operations before initialisation");
}
}
bool CUDAMatrix::isDiagonal() {
if (initialised) {
if (!isSquare()) {
return false;
}
for (int c1 = 0; c1 < numRows; c1++) {
for (int c2 = 0; c2 < numCols; c2++) {
if (c1 != c2 && getCell(c1, c2) != 0.0) {
return false;
}
}
}
return true;
} else {
throw std::runtime_error("Cannot perform matrix operations before initialisation");
}
}
bool CUDAMatrix::isIdentity() {
if (initialised) {
for (int c1 = 0; c1 < numRows; c1++) {
for (int c2 = 0; c2 < numCols; c2++) {
if ((c1 != c2 && getCell(c1, c2) != 0.0) || (c1 == c2 && getCell(c1, c2) != 1.0)) {
return false;
}
}
}
return true;
} else {
throw std::runtime_error("Cannot perform matrix operations before initialisation");
}
}
bool CUDAMatrix::isZero() {
if (initialised) {
for (int c1 = 0; c1 < numRows; c1++) {
for (int c2 = 0; c2 < numCols; c2++) {
if (getCell(c1, c2) != 0.0) {
return false;
}
}
}
return true;
} else {
throw std::runtime_error("Cannot perform matrix operations before initialisation");
}
}
bool CUDAMatrix::isSmall() {
return utils::max(numRows, numCols) < 150;
}
bool CUDAMatrix::isComplex() {
std::complex<double> cell;
for (int c1 = 0; c1 < numEls; c1++) {
cell = getCell(c1);
if (cell.imag() != 0.0) {
return true;
}
}
return false;
}
// SETTERS
void CUDAMatrix::setCell(int row, int col, std::complex<double> val) {
if (isInitialised()) {
h_matrix[numCols * row + col] = val;
} else {
throw std::runtime_error("Cannot perform matrix operations before initialisation");
}
}
void CUDAMatrix::setCell(int i, std::complex<double> val) {
if (isInitialised()) {
h_matrix[i] = val;
} else {
throw std::runtime_error("Cannot perform matrix operations before initialisation");
}
}
void CUDAMatrix::setMatrix(std::complex<double> val) {
if (isInitialised()) {
for (int c1 = 0; c1 < getNumEls(); c1++) {
h_matrix[c1] = val;
}
} else {
throw std::runtime_error("Cannot perform matrix operations before initialisation");
}
}
void CUDAMatrix::setMatrix(std::complex<double>* inMatrix) {
if (isInitialised()) {
for (int c1 = 0; c1 < numEls; c1++) {
h_matrix[c1] = inMatrix[c1];
}
} else {
throw std::runtime_error("Cannot perform matrix operations before initialisation");
}
}
void CUDAMatrix::setMatrix(std::initializer_list<std::complex<double>> inMatrix) {
if (isInitialised()) {
if (inMatrix.size() == getNumEls()) {
std::copy(inMatrix.begin(), inMatrix.end(), h_matrix);
} else {
throw std::runtime_error("Initialiser-list size does not match matrix size");
}
} else {
throw std::runtime_error("Cannot perform matrix operations before initialisation");
}
}
void CUDAMatrix::setIdentity() {
if (isInitialised()) {
int row, col;
for (int c1 = 0; c1 < getNumEls(); c1++) {
row = getCurRow(c1);
col = getCurCol(c1);
if (row == col) {
h_matrix[c1] = 1;
} else {
h_matrix[c1] = 0;
}
}
} else {
throw std::runtime_error("Cannot perform matrix operations before initialisation");
}
}
void CUDAMatrix::setRandomDouble(double min, double max) {
if (isInitialised()) {
double r;
std::default_random_engine rng((unsigned int) (time(0)));
std::uniform_real_distribution<double> gen(min, max);
for (int c1 = 0; c1 < numEls; c1++) {
r = gen(rng);
setCell(c1, r);
}
} else {
throw std::runtime_error("Cannot perform matrix operations before initialisation");
}
}
void CUDAMatrix::setRandomInt(int min, int max) {
if (isInitialised()) {
int r;
std::default_random_engine rng((unsigned int) (time(0)));
std::uniform_int_distribution<int> gen(min, max);
for (int c1 = 0; c1 < numEls; c1++) {
r = gen(rng);
setCell(c1, r);
}
} else {
throw std::runtime_error("Cannot perform matrix operations before initialisation");
}
}
// GETTERS
double CUDAMatrix::getNorm(int n) {
int c1, c2;
double sum, max = 0;
if (n == 1) {
// 1 Norm
for (c1 = 0; c1 < numCols; c1++) {
sum = 0;
for (c2 = 0; c2 < numRows; c2++) {
sum += std::abs(getCell(c2, c1));
}
if (std::norm(sum) > std::norm(max)) {
max = sum;
}
}
return max;
} else if (n == INFINITY) {
// Inf Norm
for (c1 = 0; c1 < numRows; c1++) {
sum = 0;
for (c2 = 0; c2 < numCols; c2++) {
sum += std::abs(getCell(c2, c1));
}
if (std::norm(sum) > std::norm(max)) {
max = sum;
}
}
return max;
} else {
//// Euclidian Not called from anywhere. Requires SVD implementation to work.
//sum = 0;
//for (c1 = 0; c1 < numEls; c1++) {
// sum += std::pow(getCell(c1), n);
//}
//return std::pow(sum, 1.0 / n);
return -1;
}
}
int CUDAMatrix::getCurRow(int i) {
if (isInitialised()) {
return (int) (floor(i / numCols));
} else {
throw std::runtime_error("Cannot perform matrix operations before initialisation");
}
}
int CUDAMatrix::getCurCol(int i) {
if (isInitialised()) {
return (int) (i - (numCols*getCurRow(i)));
} else {
throw std::runtime_error("Cannot perform matrix operations before initialisation");
}
}
std::complex<double> CUDAMatrix::getCell(int row, int col) {
if (isInitialised()) {
return h_matrix[row*numCols + col];
} else {
throw std::runtime_error("Cannot perform matrix operations before initialisation");
}
}
std::complex<double> CUDAMatrix::getCell(int i) {
if (isInitialised()) {
return h_matrix[i];
} else {
throw std::runtime_error("Cannot perform matrix operations before initialisation");
}
}
std::complex<double>* CUDAMatrix::getMatrix() {
if (isInitialised()) {
return h_matrix;
} else {
throw std::runtime_error("Cannot perform matrix operations before initialisation");
}
}
int CUDAMatrix::getNumRows() {
if (isInitialised()) {
return numRows;
} else {
throw std::runtime_error("Cannot perform matrix operations before initialisation");
}
}
int CUDAMatrix::getNumCols() {
if (isInitialised()) {
return numCols;
} else {
throw std::runtime_error("Cannot perform matrix operations before initialisation");
}
}
int CUDAMatrix::getNumEls() {
if (isInitialised()) {
return numEls;
} else {
throw std::runtime_error("Cannot perform matrix operations before initialisation");
}
}
size_t CUDAMatrix::getSize() {
if (isInitialised()) {
return size;
} else {
throw std::runtime_error("Cannot perform matrix operations before initialisation");
}
}
// UTILS
int utils::getNumDigits(double x) {
if (x > 1.0 || x < -1.0) {
return (int) (floor(log10(abs(x))) + 1);
}
return 1;
}
int utils::max(int x, int y) {
if (x > y) {
return x;
} else {
return y;
}
}
double utils::max(double x, double y) {
if (x > y) {
return x;
} else {
return y;
}
}
int utils::min(int x, int y) {
if (x < y) {
return x;
} else {
return y;
}
}
double utils::min(double x, double y) {
if (x < y) {
return x;
} else {
return y;
}
}
// OPERATOR OVERRIDES
std::ostream& operator<<(std::ostream& oStream, CUDAMatrix& A) {
if (A.isInitialised()) {
// Init
std::complex<double> cell;
bool isComplex = A.isComplex();
bool scientific = false;
int c1, c2, r, i;
int realLength = 0, imagLength = 0, exp = 0;
double divider;
int precision = 0;
int maxFixedDigits = 4;
// Get info
for (c1 = 0; c1 < A.getNumEls(); c1++) {
cell = A.getCell(c1);
// Check if it's decimal
if ((cell.real() - (int) (cell.real())) != 0.0 ||
(cell.imag() - (int) (cell.imag())) != 0.0) {
precision = 4;
}
// Get maximum exponent
r = utils::getNumDigits(cell.real());
i = utils::getNumDigits(cell.imag());
if (r - 1 > exp) {
exp = r - 1;
realLength = r;
}
if (i - 1 > exp) {
exp = i - 1;
imagLength = i;
if (abs(cell.imag() == 1.0)) {
imagLength++;
}
}
}
// Check if the output should be in fixed or scientific form
if (exp >= maxFixedDigits) {
scientific = true;
}
// Get divider for scientific form
divider = std::pow(10, exp);
// Output name and multiplier
oStream << " = ";
if (scientific) {
oStream << "(10 ^ " << exp << ") *";
}
// Output cell
oStream << std::endl << std::setprecision(precision) << std::fixed;
for (c1 = 0; c1 < A.getNumEls(); c1++) {
cell = A.getCell(c1);
oStream << "| ";
// Spacing and formatting for scientific/fixed
if (scientific) {
cell /= divider;
} else {
r = utils::getNumDigits(cell.real());
for (c2 = 0; c2 < (realLength - r); c2++) {
oStream << " ";
}
}
// Output real
oStream << cell.real() << " ";
// Output complex
if (isComplex) {
if (cell.imag() != 0.0) {
if (cell.imag() > 0.0) {
oStream << "+ ";
} else {
oStream << "- ";
}
if (abs(cell.imag()) != 1.0) {
oStream << std::abs(cell.imag());
} else {
oStream << " ";
}
oStream << "i ";
} else {
i = utils::getNumDigits(cell.imag());
for (c2 = 0; c2 < imagLength + 3; c2++) {
oStream << " ";
}
}
}
// Output new line if row end reached
if (A.getCurRow(c1 + 1) > A.getCurRow(c1)) {
oStream << "|";
if (A.getCurRow(c1 + 1) < A.getNumRows()) {
oStream << std::endl;
}
}
}
oStream << std::endl;
return oStream;
// // Get precision
// cell = A.getCell(c1);
// if ((cell - (int) (cell)) != 0.0) {
// precision = 5;
// }
// // Get maximum number length
// length = utils::getNumDigits(cell);
// if (length > maxLength) {
// maxLength = length;
// }
//}
//for (c1 = 0; c1 < A.getNumEls(); c1++) {
// cell = A.getCell(c1);
// // Remove negative zeros
// if (cell == 0.0) {
// cell = 0;
// }
// oStream << "| ";
// // Add whitespace if shorter than maxLength
// length = utils::getNumDigits(cell);
// for (c2 = 0; c2 < (maxLength - length); c2++) {
// oStream << " ";
// }
// // Output number
// oStream << std::setprecision(precision) << std::fixed << cell << " ";
// // Output new line if row end reached
// if (A.getCurRow(c1 + 1) > A.getCurRow(c1)) {
// oStream << "|";
// if (A.getCurRow(c1 + 1) < A.getNumRows()) {
// oStream << std::endl;
// }
// }
} else {
throw std::runtime_error("Cannot perform matrix operations before initialisation");
}
} |
7786b2ce2f86f34a05a295eb6cde22dce9460da8.hip | // !!! This is a file automatically generated by hipify!!!
#include <hip/hip_runtime.h>
#include <hip/hip_runtime.h>
#include "common.h"
#include "efficient.h"
#include <src/sceneStructs.h>
namespace StreamCompaction {
namespace Efficient {
using StreamCompaction::Common::PerformanceTimer;
PerformanceTimer& timer()
{
static PerformanceTimer timer;
return timer;
}
#define blockSize 32
#define MAX_BLOCK_SIZE 16
#define checkCUDAErrorWithLine(msg) ((void)0)
//checkCUDAError(msg, __LINE__)
#define USE_CUDA_DEV_SYNC 0
__global__ void upSweepIteration(int n, int *odata, const int offset, const int halfOffset) {
int idx = (blockIdx.x * blockDim.x) + threadIdx.x;
int nodeIdx = (idx + 1) * offset - 1;
if (nodeIdx < n) {
odata[nodeIdx] = odata[nodeIdx] + odata[nodeIdx - halfOffset];
}
}
__global__ void setRootToZero(int n, int *odata) {
odata[n - 1] = 0;
}
__global__ void downSweepIteration(int n, int *odata, const int offset, const int halfOffset) {
int idx = (blockIdx.x * blockDim.x) + threadIdx.x;
int nodeIdx = (idx + 1) * offset - 1;
if (nodeIdx < n) {
int originalNodeValue = odata[nodeIdx];
odata[nodeIdx] = odata[nodeIdx] + odata[nodeIdx - halfOffset];
odata[nodeIdx - halfOffset] = originalNodeValue;
}
}
/**
* Performs prefix-sum (aka scan) on idata, storing the result into odata.
* internalUse specifies whether this is used as a helper function,
* for example, in compact. If so, it assumes idata and odata are in
* device memory and does not use gpuTimer.
*/
void scan(int n, int *odata, const int *idata, bool internalUse) {
if (n == 1) {
odata[0] = 0;
return;
}
// TODO: handle n <= 2 ???
// nearest power of two
const int bufSize = 1 << ilog2ceil(n);
int *dev_buf;
if (internalUse) {
dev_buf = odata;
}
else {
hipMalloc((void**)&dev_buf, bufSize * sizeof(int));
checkCUDAErrorWithLine("malloc dev_buf error!!!");
if (n != bufSize) {
hipMemset(dev_buf + n, 0, (bufSize - n) * sizeof(int));
checkCUDAErrorWithLine("memset dev_buf to 0 error!!!");
}
hipMemcpy(dev_buf, idata, n * sizeof(int), hipMemcpyHostToDevice);
checkCUDAErrorWithLine("memcpy dev_buf error!!!");
}
int halfOffset = 1;
int numThreads = bufSize / 2;
dim3 numBlocks(1);
int threadsPerBlock;
hipDeviceSynchronize();
checkCUDAErrorWithLine("cuda sync error!!!");
if (!internalUse) {
timer().startGpuTimer();
}
// skip offset = n because we overwrite root's value anyway
for (int offset = 2; offset < bufSize; offset *= 2) {
if (numThreads > MAX_BLOCK_SIZE) {
numBlocks.x = numThreads / MAX_BLOCK_SIZE;
//numBlocks = dim3(numThreads / MAX_BLOCK_SIZE);
threadsPerBlock = MAX_BLOCK_SIZE;
}
else {
numBlocks.x = 1;
//numBlocks = dim3(1);
threadsPerBlock = numThreads;
}
hipLaunchKernelGGL(( upSweepIteration), dim3(numBlocks), dim3(threadsPerBlock), 0, 0, bufSize, dev_buf, offset, halfOffset);
checkCUDAErrorWithLine("upSweep error!!!");
halfOffset = offset;
numThreads /= 2;
}
setRootToZero << <dim3(1), 1 >> > (bufSize, dev_buf);
int offset = bufSize;
numThreads = 1;
for (int halfOffset = bufSize / 2; halfOffset >= 1; halfOffset /= 2) {
if (numThreads > MAX_BLOCK_SIZE) {
numBlocks.x = numThreads / MAX_BLOCK_SIZE;
//numBlocks = dim3(numThreads / MAX_BLOCK_SIZE);
threadsPerBlock = MAX_BLOCK_SIZE;
}
else {
numBlocks.x = 1;
//numBlocks = dim3(1);
threadsPerBlock = numThreads;
}
downSweepIteration << <numBlocks, threadsPerBlock >> >(bufSize, dev_buf, offset, halfOffset);
checkCUDAErrorWithLine("downSweep error!!!");
offset = halfOffset;
numThreads *= 2;
}
if (!internalUse) {
timer().endGpuTimer();
hipMemcpy(odata, dev_buf, n * sizeof(int), hipMemcpyDeviceToHost);
checkCUDAErrorWithLine("memcpy dev_buf to host error!!!");
hipFree(dev_buf);
checkCUDAErrorWithLine("free dev_buf error!!!");
}
}
__global__ void map(int n, int *odata, const int *idata) {
int idx = (blockIdx.x * blockDim.x) + threadIdx.x; // TODO?
if (idx < n) {
odata[idx] = (idata[idx] != 0) ? 1 : 0;
}
}
__global__ void scatter(int n, int *odata, const int *postMapData, const int *postScanData, const int *originalData) {
int idx = (blockIdx.x * blockDim.x) + threadIdx.x; // TODO?
if (idx < n && postMapData[idx]) {
odata[postScanData[idx]] = originalData[idx];
}
}
__global__ void getCompactedSize(int n, int *odata, const int *postMapData, const int *postScanData) {
*odata = postScanData[n - 1] + (postMapData[n - 1] ? 1 : 0);
}
/**
* Performs stream compaction on idata, storing the result into odata.
* All zeroes are discarded.
*
* @param n The number of elements in idata.
* @param odata The array into which to store elements.
* @param idata The array of elements to compact.
* @returns The number of elements remaining after compaction.
*/
int compact(int n, int *odata, const int *idata) {
int *dev_originalData;
int *dev_postMapBuf;
int *dev_postScanBuf;
int *dev_postScatterBuf;
int *dev_scatteredSize;
hipMalloc((void**)&dev_originalData, n * sizeof(int));
checkCUDAErrorWithLine("malloc dev_originalData error!!!");
hipMalloc((void**)&dev_postMapBuf, n * sizeof(int));
checkCUDAErrorWithLine("malloc dev_postMapBuf error!!!");
// needs to be power of 2 for scan to work
const int postScanBufSize = 1 << ilog2ceil(n);
hipMalloc((void**)&dev_postScanBuf, postScanBufSize * sizeof(int));
checkCUDAErrorWithLine("malloc dev_postScanBuf error!!!");
if (postScanBufSize != n) {
hipMemset(dev_postScanBuf, 0, postScanBufSize * sizeof(int));
checkCUDAErrorWithLine("memset dev_postScanBuf to 0 error!!!");
}
hipMalloc((void**)&dev_postScatterBuf, n * sizeof(int));
checkCUDAErrorWithLine("malloc dev_postScatterBuf error!!!");
hipMalloc((void**)&dev_scatteredSize, sizeof(int));
checkCUDAErrorWithLine("malloc dev_scatteredSize error!!!");
hipMemcpy(dev_originalData, idata, n * sizeof(int), hipMemcpyHostToDevice);
checkCUDAErrorWithLine("memcpy dev_originalData from host error!!!");
dim3 numBlocks((n + blockSize - 1) / blockSize);
timer().startGpuTimer();
hipLaunchKernelGGL(( map), dim3(numBlocks), dim3(blockSize), 0, 0, n, dev_postMapBuf, dev_originalData);
checkCUDAErrorWithLine("map error!!!");
hipMemcpy(dev_postScanBuf, dev_postMapBuf, n * sizeof(int), hipMemcpyDeviceToDevice);
checkCUDAErrorWithLine("memcpy map to scan error!!!");
scan(n, dev_postScanBuf, dev_postMapBuf, true);
checkCUDAErrorWithLine("scan error!!!");
scatter << <numBlocks, blockSize >> > (n, dev_postScatterBuf, dev_postMapBuf, dev_postScanBuf, dev_originalData);
checkCUDAErrorWithLine("scatter error!!!");
hipLaunchKernelGGL(( getCompactedSize), dim3(dim3(1)), dim3(1), 0, 0, n, dev_scatteredSize, dev_postMapBuf, dev_postScanBuf);
checkCUDAErrorWithLine("get size error!!!");
timer().endGpuTimer();
int scatteredSize;
hipMemcpy(&scatteredSize, dev_scatteredSize, sizeof(int), hipMemcpyDeviceToHost);
checkCUDAErrorWithLine("memcpy dev_scatteredSize to host error!!!");
hipMemcpy(odata, dev_postScatterBuf, scatteredSize * sizeof(int), hipMemcpyDeviceToHost);
checkCUDAErrorWithLine("memcpy dev_postScatterBuf to host error!!!");
hipFree(dev_originalData);
checkCUDAErrorWithLine("free dev_originalData error!!!");
hipFree(dev_postMapBuf);
checkCUDAErrorWithLine("free dev_postMapBuf error!!!");
hipFree(dev_postScanBuf);
checkCUDAErrorWithLine("free dev_postScanBuf error!!!");
hipFree(dev_postScatterBuf);
checkCUDAErrorWithLine("free dev_postScatterBuf error!!!");
hipFree(dev_scatteredSize);
checkCUDAErrorWithLine("free dev_scatteredSize error!!!");
return scatteredSize;
}
}
}
| 7786b2ce2f86f34a05a295eb6cde22dce9460da8.cu | #include <cuda.h>
#include <cuda_runtime.h>
#include "common.h"
#include "efficient.h"
#include <src/sceneStructs.h>
namespace StreamCompaction {
namespace Efficient {
using StreamCompaction::Common::PerformanceTimer;
PerformanceTimer& timer()
{
static PerformanceTimer timer;
return timer;
}
#define blockSize 32
#define MAX_BLOCK_SIZE 16
#define checkCUDAErrorWithLine(msg) ((void)0)
//checkCUDAError(msg, __LINE__)
#define USE_CUDA_DEV_SYNC 0
__global__ void upSweepIteration(int n, int *odata, const int offset, const int halfOffset) {
int idx = (blockIdx.x * blockDim.x) + threadIdx.x;
int nodeIdx = (idx + 1) * offset - 1;
if (nodeIdx < n) {
odata[nodeIdx] = odata[nodeIdx] + odata[nodeIdx - halfOffset];
}
}
__global__ void setRootToZero(int n, int *odata) {
odata[n - 1] = 0;
}
__global__ void downSweepIteration(int n, int *odata, const int offset, const int halfOffset) {
int idx = (blockIdx.x * blockDim.x) + threadIdx.x;
int nodeIdx = (idx + 1) * offset - 1;
if (nodeIdx < n) {
int originalNodeValue = odata[nodeIdx];
odata[nodeIdx] = odata[nodeIdx] + odata[nodeIdx - halfOffset];
odata[nodeIdx - halfOffset] = originalNodeValue;
}
}
/**
* Performs prefix-sum (aka scan) on idata, storing the result into odata.
* internalUse specifies whether this is used as a helper function,
* for example, in compact. If so, it assumes idata and odata are in
* device memory and does not use gpuTimer.
*/
void scan(int n, int *odata, const int *idata, bool internalUse) {
if (n == 1) {
odata[0] = 0;
return;
}
// TODO: handle n <= 2 ???
// nearest power of two
const int bufSize = 1 << ilog2ceil(n);
int *dev_buf;
if (internalUse) {
dev_buf = odata;
}
else {
cudaMalloc((void**)&dev_buf, bufSize * sizeof(int));
checkCUDAErrorWithLine("malloc dev_buf error!!!");
if (n != bufSize) {
cudaMemset(dev_buf + n, 0, (bufSize - n) * sizeof(int));
checkCUDAErrorWithLine("memset dev_buf to 0 error!!!");
}
cudaMemcpy(dev_buf, idata, n * sizeof(int), cudaMemcpyHostToDevice);
checkCUDAErrorWithLine("memcpy dev_buf error!!!");
}
int halfOffset = 1;
int numThreads = bufSize / 2;
dim3 numBlocks(1);
int threadsPerBlock;
cudaDeviceSynchronize();
checkCUDAErrorWithLine("cuda sync error!!!");
if (!internalUse) {
timer().startGpuTimer();
}
// skip offset = n because we overwrite root's value anyway
for (int offset = 2; offset < bufSize; offset *= 2) {
if (numThreads > MAX_BLOCK_SIZE) {
numBlocks.x = numThreads / MAX_BLOCK_SIZE;
//numBlocks = dim3(numThreads / MAX_BLOCK_SIZE);
threadsPerBlock = MAX_BLOCK_SIZE;
}
else {
numBlocks.x = 1;
//numBlocks = dim3(1);
threadsPerBlock = numThreads;
}
upSweepIteration<<<numBlocks, threadsPerBlock>>>(bufSize, dev_buf, offset, halfOffset);
checkCUDAErrorWithLine("upSweep error!!!");
halfOffset = offset;
numThreads /= 2;
}
setRootToZero << <dim3(1), 1 >> > (bufSize, dev_buf);
int offset = bufSize;
numThreads = 1;
for (int halfOffset = bufSize / 2; halfOffset >= 1; halfOffset /= 2) {
if (numThreads > MAX_BLOCK_SIZE) {
numBlocks.x = numThreads / MAX_BLOCK_SIZE;
//numBlocks = dim3(numThreads / MAX_BLOCK_SIZE);
threadsPerBlock = MAX_BLOCK_SIZE;
}
else {
numBlocks.x = 1;
//numBlocks = dim3(1);
threadsPerBlock = numThreads;
}
downSweepIteration << <numBlocks, threadsPerBlock >> >(bufSize, dev_buf, offset, halfOffset);
checkCUDAErrorWithLine("downSweep error!!!");
offset = halfOffset;
numThreads *= 2;
}
if (!internalUse) {
timer().endGpuTimer();
cudaMemcpy(odata, dev_buf, n * sizeof(int), cudaMemcpyDeviceToHost);
checkCUDAErrorWithLine("memcpy dev_buf to host error!!!");
cudaFree(dev_buf);
checkCUDAErrorWithLine("free dev_buf error!!!");
}
}
__global__ void map(int n, int *odata, const int *idata) {
int idx = (blockIdx.x * blockDim.x) + threadIdx.x; // TODO?
if (idx < n) {
odata[idx] = (idata[idx] != 0) ? 1 : 0;
}
}
__global__ void scatter(int n, int *odata, const int *postMapData, const int *postScanData, const int *originalData) {
int idx = (blockIdx.x * blockDim.x) + threadIdx.x; // TODO?
if (idx < n && postMapData[idx]) {
odata[postScanData[idx]] = originalData[idx];
}
}
__global__ void getCompactedSize(int n, int *odata, const int *postMapData, const int *postScanData) {
*odata = postScanData[n - 1] + (postMapData[n - 1] ? 1 : 0);
}
/**
* Performs stream compaction on idata, storing the result into odata.
* All zeroes are discarded.
*
* @param n The number of elements in idata.
* @param odata The array into which to store elements.
* @param idata The array of elements to compact.
* @returns The number of elements remaining after compaction.
*/
int compact(int n, int *odata, const int *idata) {
int *dev_originalData;
int *dev_postMapBuf;
int *dev_postScanBuf;
int *dev_postScatterBuf;
int *dev_scatteredSize;
cudaMalloc((void**)&dev_originalData, n * sizeof(int));
checkCUDAErrorWithLine("malloc dev_originalData error!!!");
cudaMalloc((void**)&dev_postMapBuf, n * sizeof(int));
checkCUDAErrorWithLine("malloc dev_postMapBuf error!!!");
// needs to be power of 2 for scan to work
const int postScanBufSize = 1 << ilog2ceil(n);
cudaMalloc((void**)&dev_postScanBuf, postScanBufSize * sizeof(int));
checkCUDAErrorWithLine("malloc dev_postScanBuf error!!!");
if (postScanBufSize != n) {
cudaMemset(dev_postScanBuf, 0, postScanBufSize * sizeof(int));
checkCUDAErrorWithLine("memset dev_postScanBuf to 0 error!!!");
}
cudaMalloc((void**)&dev_postScatterBuf, n * sizeof(int));
checkCUDAErrorWithLine("malloc dev_postScatterBuf error!!!");
cudaMalloc((void**)&dev_scatteredSize, sizeof(int));
checkCUDAErrorWithLine("malloc dev_scatteredSize error!!!");
cudaMemcpy(dev_originalData, idata, n * sizeof(int), cudaMemcpyHostToDevice);
checkCUDAErrorWithLine("memcpy dev_originalData from host error!!!");
dim3 numBlocks((n + blockSize - 1) / blockSize);
timer().startGpuTimer();
map<<<numBlocks, blockSize>>>(n, dev_postMapBuf, dev_originalData);
checkCUDAErrorWithLine("map error!!!");
cudaMemcpy(dev_postScanBuf, dev_postMapBuf, n * sizeof(int), cudaMemcpyDeviceToDevice);
checkCUDAErrorWithLine("memcpy map to scan error!!!");
scan(n, dev_postScanBuf, dev_postMapBuf, true);
checkCUDAErrorWithLine("scan error!!!");
scatter << <numBlocks, blockSize >> > (n, dev_postScatterBuf, dev_postMapBuf, dev_postScanBuf, dev_originalData);
checkCUDAErrorWithLine("scatter error!!!");
getCompactedSize<<<dim3(1), 1>>>(n, dev_scatteredSize, dev_postMapBuf, dev_postScanBuf);
checkCUDAErrorWithLine("get size error!!!");
timer().endGpuTimer();
int scatteredSize;
cudaMemcpy(&scatteredSize, dev_scatteredSize, sizeof(int), cudaMemcpyDeviceToHost);
checkCUDAErrorWithLine("memcpy dev_scatteredSize to host error!!!");
cudaMemcpy(odata, dev_postScatterBuf, scatteredSize * sizeof(int), cudaMemcpyDeviceToHost);
checkCUDAErrorWithLine("memcpy dev_postScatterBuf to host error!!!");
cudaFree(dev_originalData);
checkCUDAErrorWithLine("free dev_originalData error!!!");
cudaFree(dev_postMapBuf);
checkCUDAErrorWithLine("free dev_postMapBuf error!!!");
cudaFree(dev_postScanBuf);
checkCUDAErrorWithLine("free dev_postScanBuf error!!!");
cudaFree(dev_postScatterBuf);
checkCUDAErrorWithLine("free dev_postScatterBuf error!!!");
cudaFree(dev_scatteredSize);
checkCUDAErrorWithLine("free dev_scatteredSize error!!!");
return scatteredSize;
}
}
}
|
e8f563c4f6ee5098b280960bcdda011091787c95.hip | // !!! This is a file automatically generated by hipify!!!
#include <stdio.h>
#include <stdlib.h>
#include <cutil.h>
#include <math.h>
// Includes
#include <stdio.h>
#include <string.h>
#include <hip/hip_runtime.h>
// includes, project
#include "../include/sdkHelper.h" // helper for shared functions common to CUDA SDK samples
//#include <shrQATest.h>
//#include <shrUtils.h>
#include "../include/ContAcq-IntClk.h"
// includes CUDA
#include <hip/hip_runtime.h>
#define MAX_THREADS_PER_BLOCK 256
#define THREADS_PER_BLOCK 256
#define LINE_SIZE 128
#define SETS 4
#define ASSOC 24
#define SIMD_WIDTH 32
// Variables
int no_of_nodes;
int edge_list_size;
FILE *fp;
//Structure to hold a node information
struct Node
{
int starting;
int no_of_edges;
};
bool noprompt = false;
unsigned int my_timer;
// Functions
void CleanupResources(void);
void RandomInit(int*, int);
void ParseArguments(int, char**);
////////////////////////////////////////////////////////////////////////////////
// These are CUDA Helper functions
// This will output the proper CUDA error strings in the event that a CUDA host call returns an error
#define checkCudaErrors(err) __checkCudaErrors (err, __FILE__, __LINE__)
inline void __checkCudaErrors(hipError_t err, const char *file, const int line ){
if(hipSuccess != err){
fprintf(stderr, "%s(%i) : CUDA Runtime API error %d: %s.\n",file, line, (int)err, hipGetErrorString( err ) );
exit(-1);
}
}
// This will output the proper error string when calling hipGetLastError
#define getLastCudaError(msg) __getLastCudaError (msg, __FILE__, __LINE__)
inline void __getLastCudaError(const char *errorMessage, const char *file, const int line ){
hipError_t err = hipGetLastError();
if (hipSuccess != err){
fprintf(stderr, "%s(%i) : getLastCudaError() CUDA error : %s : (%d) %s.\n",file, line, errorMessage, (int)err, hipGetErrorString( err ) );
exit(-1);
}
}
// end of CUDA Helper Functions
// Device code
#define ITERATIONS 30
texture<float,1,hipReadModeElementType> texmem1;
texture<float,1,hipReadModeElementType> texmem2;
texture<float,1,hipReadModeElementType> texmem3;
texture<float,1,hipReadModeElementType> texmem4;
texture<float,1,hipReadModeElementType> texmem5;
texture<float,1,hipReadModeElementType> texmem6;
texture<float,1,hipReadModeElementType> texmem7;
texture<float,1,hipReadModeElementType> texmem9;
texture<float,1,hipReadModeElementType> texmem8;
__constant__ float ConstArray1[THREADS_PER_BLOCK];
__constant__ float ConstArray2[THREADS_PER_BLOCK];
__constant__ float ConstArray3[THREADS_PER_BLOCK];
__constant__ float ConstArray4[THREADS_PER_BLOCK];
__constant__ float ConstArray5[THREADS_PER_BLOCK];
__constant__ float ConstArray6[THREADS_PER_BLOCK];
__constant__ float ConstArray7[THREADS_PER_BLOCK];
__constant__ float ConstArray8[THREADS_PER_BLOCK];
__global__ void tex_bm_kernel( float* out, unsigned size)
{
int tid = blockIdx.x*MAX_THREADS_PER_BLOCK + threadIdx.x;
__device__ __shared__ float I1[THREADS_PER_BLOCK];
__device__ __shared__ float I2[THREADS_PER_BLOCK];
__device__ __shared__ float I3[THREADS_PER_BLOCK];
__device__ __shared__ float I4[THREADS_PER_BLOCK];
__device__ __shared__ float I5[THREADS_PER_BLOCK];
__device__ __shared__ float I6[THREADS_PER_BLOCK];
__device__ __shared__ float I7[THREADS_PER_BLOCK];
__device__ __shared__ float I8[THREADS_PER_BLOCK];
I1[tid%THREADS_PER_BLOCK] = tid;
I2[tid%THREADS_PER_BLOCK] = tid/2;
I3[tid%THREADS_PER_BLOCK] = 2*tid;
I4[tid%THREADS_PER_BLOCK] = tid+2;
I5[tid%THREADS_PER_BLOCK] = 5*tid;
I6[tid%THREADS_PER_BLOCK] = tid/2;
I7[tid%THREADS_PER_BLOCK] = tid*10;
I8[tid%THREADS_PER_BLOCK] = tid/2;
if(tid < size){
for(unsigned i=0; i<ITERATIONS; ++i){
out[tid] = tex1Dfetch(texmem1,tid);
out[tid*2] = ConstArray1[(tid+i)%THREADS_PER_BLOCK];
out[tid*3] = I1[(tid+i)%THREADS_PER_BLOCK];
out[tid*4] = tex1Dfetch(texmem4,tid);
out[tid*5] = ConstArray2[(tid+i)%THREADS_PER_BLOCK];
out[tid*6] = I3[tid%THREADS_PER_BLOCK];
out[tid*7] = tex1Dfetch(texmem7,tid);
out[tid*8] = ConstArray3[(tid+i)%THREADS_PER_BLOCK];
out[tid*9] =tex1Dfetch(texmem9,tid);
}
}
}
////////////////////////////////////////////////////////////////////////////////
// Main Program
////////////////////////////////////////////////////////////////////////////////
int main( int argc, char** argv)
{
float array1[THREADS_PER_BLOCK];
for(int i=0; i<THREADS_PER_BLOCK;i++){
srand(time(0));
array1[i] = rand() / RAND_MAX;
}
float array2[THREADS_PER_BLOCK];
for(int i=0; i<THREADS_PER_BLOCK;i++){
srand(time(0));
array2[i] = rand() / RAND_MAX;
}
float array3[THREADS_PER_BLOCK];
for(int i=0; i<THREADS_PER_BLOCK;i++){
srand(time(0));
array3[i] = rand() / RAND_MAX;
}
float array4[THREADS_PER_BLOCK];
for(int i=0; i<THREADS_PER_BLOCK;i++){
srand(time(0));
array4[i] = rand() / RAND_MAX;
}
float array5[THREADS_PER_BLOCK];
for(int i=0; i<THREADS_PER_BLOCK;i++){
srand(time(0));
array5[i] = rand() / RAND_MAX;
}
float array6[THREADS_PER_BLOCK];
for(int i=0; i<THREADS_PER_BLOCK;i++){
srand(time(0));
array6[i] = rand() / RAND_MAX;
}
float array7[THREADS_PER_BLOCK];
for(int i=0; i<THREADS_PER_BLOCK;i++){
srand(time(0));
array7[i] = rand() / RAND_MAX;
}
float array8[THREADS_PER_BLOCK];
for(int i=0; i<THREADS_PER_BLOCK;i++){
srand(time(0));
array8[i] = rand() / RAND_MAX;
}
hipMemcpyToSymbol("ConstArray1", array1, sizeof(float) * THREADS_PER_BLOCK );
hipMemcpyToSymbol("ConstArray2", array2, sizeof(float) * THREADS_PER_BLOCK );
hipMemcpyToSymbol("ConstArray3", array3, sizeof(float) * THREADS_PER_BLOCK );
hipMemcpyToSymbol("ConstArray4", array4, sizeof(float) * THREADS_PER_BLOCK );
hipMemcpyToSymbol("ConstArray5", array5, sizeof(float) * THREADS_PER_BLOCK );
hipMemcpyToSymbol("ConstArray6", array6, sizeof(float) * THREADS_PER_BLOCK );
hipMemcpyToSymbol("ConstArray7", array7, sizeof(float) * THREADS_PER_BLOCK );
hipMemcpyToSymbol("ConstArray8", array8, sizeof(float) * THREADS_PER_BLOCK );
int texmem_size = LINE_SIZE*SETS*ASSOC;
float *host_texture1 = (float*) malloc(texmem_size*sizeof(float));
for (int i=0; i< texmem_size; i++) {
host_texture1[i] = i;
}
float *device_texture1;
float *device_texture2;
float *device_texture3;
float *device_texture4;
float *device_texture5;
float *device_texture6;
float *device_texture7;
float *device_texture8;
float *device_texture9;
float *host_out = (float*) malloc(texmem_size*sizeof(float)*10);
float *device_out;
hipMalloc((void**) &device_texture1, texmem_size);
hipMalloc((void**) &device_texture2, texmem_size);
hipMalloc((void**) &device_texture3, texmem_size);
hipMalloc((void**) &device_texture4, texmem_size);
hipMalloc((void**) &device_texture5, texmem_size);
hipMalloc((void**) &device_texture6, texmem_size);
hipMalloc((void**) &device_texture7, texmem_size);
hipMalloc((void**) &device_texture8, texmem_size);
hipMalloc((void**) &device_texture9, texmem_size);
hipMalloc((void**) &device_out, texmem_size*10);
hipMemcpy(device_texture1, host_texture1, texmem_size*sizeof(float), hipMemcpyHostToDevice);
hipMemcpy(device_texture2, host_texture1, texmem_size*sizeof(float), hipMemcpyHostToDevice);
hipMemcpy(device_texture3, host_texture1, texmem_size*sizeof(float), hipMemcpyHostToDevice);
hipMemcpy(device_texture4, host_texture1, texmem_size*sizeof(float), hipMemcpyHostToDevice);
hipMemcpy(device_texture5, host_texture1, texmem_size*sizeof(float), hipMemcpyHostToDevice);
hipMemcpy(device_texture6, host_texture1, texmem_size*sizeof(float), hipMemcpyHostToDevice);
hipMemcpy(device_texture7, host_texture1, texmem_size*sizeof(float), hipMemcpyHostToDevice);
hipMemcpy(device_texture8, host_texture1, texmem_size*sizeof(float), hipMemcpyHostToDevice);
hipMemcpy(device_texture9, host_texture1, texmem_size*sizeof(float), hipMemcpyHostToDevice);
hipBindTexture(0, texmem1, device_texture1, texmem_size);
hipBindTexture(0, texmem2, device_texture2, texmem_size);
hipBindTexture(0, texmem3, device_texture3, texmem_size);
hipBindTexture(0, texmem4, device_texture4, texmem_size);
hipBindTexture(0, texmem5, device_texture5, texmem_size);
hipBindTexture(0, texmem6, device_texture6, texmem_size);
hipBindTexture(0, texmem7, device_texture7, texmem_size);
hipBindTexture(0, texmem8, device_texture8, texmem_size);
hipBindTexture(0, texmem9, device_texture9, texmem_size);
unsigned num_blocks = (texmem_size / MAX_THREADS_PER_BLOCK) + 1;
dim3 grid( num_blocks, 1, 1);
dim3 threads( MAX_THREADS_PER_BLOCK, 1, 1);
CUT_SAFE_CALL(cutCreateTimer(&my_timer));
TaskHandle taskhandle = LaunchDAQ();
CUT_SAFE_CALL(cutStartTimer(my_timer));
hipLaunchKernelGGL(( tex_bm_kernel), dim3(grid), dim3(threads), 0 , 0, device_out, texmem_size);
hipDeviceSynchronize();
CUT_SAFE_CALL(cutStopTimer(my_timer));
TurnOffDAQ(taskhandle, cutGetTimerValue(my_timer));
printf("execution time = %f\n", cutGetTimerValue(my_timer));
CUT_SAFE_CALL(cutDeleteTimer(my_timer));
printf("Kernel DONE, probably correctly\n");
hipMemcpy(host_out, device_out, texmem_size*sizeof(float), hipMemcpyDeviceToHost);
/*
printf("Output: ");
float error = false;
for (int i=0; i< texmem_size; i++){
printf("%.1f ", host_out[i]);
if (host_out[i] - i > 0.0001) error = true;
}
printf("\n");
if (error) printf("\nFAILED\n");
else printf("\nPASSED\n");
*/
}
void CleanupResources(void){
// Free device memory
}
// Allocates an array with random float entries.
void RandomInit(int* data, int n){
for (int i = 0; i < n; ++i)
data[i] = (int)(rand() / RAND_MAX);
}
| e8f563c4f6ee5098b280960bcdda011091787c95.cu | #include <stdio.h>
#include <stdlib.h>
#include <cutil.h>
#include <math.h>
// Includes
#include <stdio.h>
#include <string.h>
#include <cuda.h>
// includes, project
#include "../include/sdkHelper.h" // helper for shared functions common to CUDA SDK samples
//#include <shrQATest.h>
//#include <shrUtils.h>
#include "../include/ContAcq-IntClk.h"
// includes CUDA
#include <cuda_runtime.h>
#define MAX_THREADS_PER_BLOCK 256
#define THREADS_PER_BLOCK 256
#define LINE_SIZE 128
#define SETS 4
#define ASSOC 24
#define SIMD_WIDTH 32
// Variables
int no_of_nodes;
int edge_list_size;
FILE *fp;
//Structure to hold a node information
struct Node
{
int starting;
int no_of_edges;
};
bool noprompt = false;
unsigned int my_timer;
// Functions
void CleanupResources(void);
void RandomInit(int*, int);
void ParseArguments(int, char**);
////////////////////////////////////////////////////////////////////////////////
// These are CUDA Helper functions
// This will output the proper CUDA error strings in the event that a CUDA host call returns an error
#define checkCudaErrors(err) __checkCudaErrors (err, __FILE__, __LINE__)
inline void __checkCudaErrors(cudaError err, const char *file, const int line ){
if(cudaSuccess != err){
fprintf(stderr, "%s(%i) : CUDA Runtime API error %d: %s.\n",file, line, (int)err, cudaGetErrorString( err ) );
exit(-1);
}
}
// This will output the proper error string when calling cudaGetLastError
#define getLastCudaError(msg) __getLastCudaError (msg, __FILE__, __LINE__)
inline void __getLastCudaError(const char *errorMessage, const char *file, const int line ){
cudaError_t err = cudaGetLastError();
if (cudaSuccess != err){
fprintf(stderr, "%s(%i) : getLastCudaError() CUDA error : %s : (%d) %s.\n",file, line, errorMessage, (int)err, cudaGetErrorString( err ) );
exit(-1);
}
}
// end of CUDA Helper Functions
// Device code
#define ITERATIONS 30
texture<float,1,cudaReadModeElementType> texmem1;
texture<float,1,cudaReadModeElementType> texmem2;
texture<float,1,cudaReadModeElementType> texmem3;
texture<float,1,cudaReadModeElementType> texmem4;
texture<float,1,cudaReadModeElementType> texmem5;
texture<float,1,cudaReadModeElementType> texmem6;
texture<float,1,cudaReadModeElementType> texmem7;
texture<float,1,cudaReadModeElementType> texmem9;
texture<float,1,cudaReadModeElementType> texmem8;
__constant__ float ConstArray1[THREADS_PER_BLOCK];
__constant__ float ConstArray2[THREADS_PER_BLOCK];
__constant__ float ConstArray3[THREADS_PER_BLOCK];
__constant__ float ConstArray4[THREADS_PER_BLOCK];
__constant__ float ConstArray5[THREADS_PER_BLOCK];
__constant__ float ConstArray6[THREADS_PER_BLOCK];
__constant__ float ConstArray7[THREADS_PER_BLOCK];
__constant__ float ConstArray8[THREADS_PER_BLOCK];
__global__ void tex_bm_kernel( float* out, unsigned size)
{
int tid = blockIdx.x*MAX_THREADS_PER_BLOCK + threadIdx.x;
__device__ __shared__ float I1[THREADS_PER_BLOCK];
__device__ __shared__ float I2[THREADS_PER_BLOCK];
__device__ __shared__ float I3[THREADS_PER_BLOCK];
__device__ __shared__ float I4[THREADS_PER_BLOCK];
__device__ __shared__ float I5[THREADS_PER_BLOCK];
__device__ __shared__ float I6[THREADS_PER_BLOCK];
__device__ __shared__ float I7[THREADS_PER_BLOCK];
__device__ __shared__ float I8[THREADS_PER_BLOCK];
I1[tid%THREADS_PER_BLOCK] = tid;
I2[tid%THREADS_PER_BLOCK] = tid/2;
I3[tid%THREADS_PER_BLOCK] = 2*tid;
I4[tid%THREADS_PER_BLOCK] = tid+2;
I5[tid%THREADS_PER_BLOCK] = 5*tid;
I6[tid%THREADS_PER_BLOCK] = tid/2;
I7[tid%THREADS_PER_BLOCK] = tid*10;
I8[tid%THREADS_PER_BLOCK] = tid/2;
if(tid < size){
for(unsigned i=0; i<ITERATIONS; ++i){
out[tid] = tex1Dfetch(texmem1,tid);
out[tid*2] = ConstArray1[(tid+i)%THREADS_PER_BLOCK];
out[tid*3] = I1[(tid+i)%THREADS_PER_BLOCK];
out[tid*4] = tex1Dfetch(texmem4,tid);
out[tid*5] = ConstArray2[(tid+i)%THREADS_PER_BLOCK];
out[tid*6] = I3[tid%THREADS_PER_BLOCK];
out[tid*7] = tex1Dfetch(texmem7,tid);
out[tid*8] = ConstArray3[(tid+i)%THREADS_PER_BLOCK];
out[tid*9] =tex1Dfetch(texmem9,tid);
}
}
}
////////////////////////////////////////////////////////////////////////////////
// Main Program
////////////////////////////////////////////////////////////////////////////////
int main( int argc, char** argv)
{
float array1[THREADS_PER_BLOCK];
for(int i=0; i<THREADS_PER_BLOCK;i++){
srand(time(0));
array1[i] = rand() / RAND_MAX;
}
float array2[THREADS_PER_BLOCK];
for(int i=0; i<THREADS_PER_BLOCK;i++){
srand(time(0));
array2[i] = rand() / RAND_MAX;
}
float array3[THREADS_PER_BLOCK];
for(int i=0; i<THREADS_PER_BLOCK;i++){
srand(time(0));
array3[i] = rand() / RAND_MAX;
}
float array4[THREADS_PER_BLOCK];
for(int i=0; i<THREADS_PER_BLOCK;i++){
srand(time(0));
array4[i] = rand() / RAND_MAX;
}
float array5[THREADS_PER_BLOCK];
for(int i=0; i<THREADS_PER_BLOCK;i++){
srand(time(0));
array5[i] = rand() / RAND_MAX;
}
float array6[THREADS_PER_BLOCK];
for(int i=0; i<THREADS_PER_BLOCK;i++){
srand(time(0));
array6[i] = rand() / RAND_MAX;
}
float array7[THREADS_PER_BLOCK];
for(int i=0; i<THREADS_PER_BLOCK;i++){
srand(time(0));
array7[i] = rand() / RAND_MAX;
}
float array8[THREADS_PER_BLOCK];
for(int i=0; i<THREADS_PER_BLOCK;i++){
srand(time(0));
array8[i] = rand() / RAND_MAX;
}
cudaMemcpyToSymbol("ConstArray1", array1, sizeof(float) * THREADS_PER_BLOCK );
cudaMemcpyToSymbol("ConstArray2", array2, sizeof(float) * THREADS_PER_BLOCK );
cudaMemcpyToSymbol("ConstArray3", array3, sizeof(float) * THREADS_PER_BLOCK );
cudaMemcpyToSymbol("ConstArray4", array4, sizeof(float) * THREADS_PER_BLOCK );
cudaMemcpyToSymbol("ConstArray5", array5, sizeof(float) * THREADS_PER_BLOCK );
cudaMemcpyToSymbol("ConstArray6", array6, sizeof(float) * THREADS_PER_BLOCK );
cudaMemcpyToSymbol("ConstArray7", array7, sizeof(float) * THREADS_PER_BLOCK );
cudaMemcpyToSymbol("ConstArray8", array8, sizeof(float) * THREADS_PER_BLOCK );
int texmem_size = LINE_SIZE*SETS*ASSOC;
float *host_texture1 = (float*) malloc(texmem_size*sizeof(float));
for (int i=0; i< texmem_size; i++) {
host_texture1[i] = i;
}
float *device_texture1;
float *device_texture2;
float *device_texture3;
float *device_texture4;
float *device_texture5;
float *device_texture6;
float *device_texture7;
float *device_texture8;
float *device_texture9;
float *host_out = (float*) malloc(texmem_size*sizeof(float)*10);
float *device_out;
cudaMalloc((void**) &device_texture1, texmem_size);
cudaMalloc((void**) &device_texture2, texmem_size);
cudaMalloc((void**) &device_texture3, texmem_size);
cudaMalloc((void**) &device_texture4, texmem_size);
cudaMalloc((void**) &device_texture5, texmem_size);
cudaMalloc((void**) &device_texture6, texmem_size);
cudaMalloc((void**) &device_texture7, texmem_size);
cudaMalloc((void**) &device_texture8, texmem_size);
cudaMalloc((void**) &device_texture9, texmem_size);
cudaMalloc((void**) &device_out, texmem_size*10);
cudaMemcpy(device_texture1, host_texture1, texmem_size*sizeof(float), cudaMemcpyHostToDevice);
cudaMemcpy(device_texture2, host_texture1, texmem_size*sizeof(float), cudaMemcpyHostToDevice);
cudaMemcpy(device_texture3, host_texture1, texmem_size*sizeof(float), cudaMemcpyHostToDevice);
cudaMemcpy(device_texture4, host_texture1, texmem_size*sizeof(float), cudaMemcpyHostToDevice);
cudaMemcpy(device_texture5, host_texture1, texmem_size*sizeof(float), cudaMemcpyHostToDevice);
cudaMemcpy(device_texture6, host_texture1, texmem_size*sizeof(float), cudaMemcpyHostToDevice);
cudaMemcpy(device_texture7, host_texture1, texmem_size*sizeof(float), cudaMemcpyHostToDevice);
cudaMemcpy(device_texture8, host_texture1, texmem_size*sizeof(float), cudaMemcpyHostToDevice);
cudaMemcpy(device_texture9, host_texture1, texmem_size*sizeof(float), cudaMemcpyHostToDevice);
cudaBindTexture(0, texmem1, device_texture1, texmem_size);
cudaBindTexture(0, texmem2, device_texture2, texmem_size);
cudaBindTexture(0, texmem3, device_texture3, texmem_size);
cudaBindTexture(0, texmem4, device_texture4, texmem_size);
cudaBindTexture(0, texmem5, device_texture5, texmem_size);
cudaBindTexture(0, texmem6, device_texture6, texmem_size);
cudaBindTexture(0, texmem7, device_texture7, texmem_size);
cudaBindTexture(0, texmem8, device_texture8, texmem_size);
cudaBindTexture(0, texmem9, device_texture9, texmem_size);
unsigned num_blocks = (texmem_size / MAX_THREADS_PER_BLOCK) + 1;
dim3 grid( num_blocks, 1, 1);
dim3 threads( MAX_THREADS_PER_BLOCK, 1, 1);
CUT_SAFE_CALL(cutCreateTimer(&my_timer));
TaskHandle taskhandle = LaunchDAQ();
CUT_SAFE_CALL(cutStartTimer(my_timer));
tex_bm_kernel<<< grid, threads, 0 >>>(device_out, texmem_size);
cudaThreadSynchronize();
CUT_SAFE_CALL(cutStopTimer(my_timer));
TurnOffDAQ(taskhandle, cutGetTimerValue(my_timer));
printf("execution time = %f\n", cutGetTimerValue(my_timer));
CUT_SAFE_CALL(cutDeleteTimer(my_timer));
printf("Kernel DONE, probably correctly\n");
cudaMemcpy(host_out, device_out, texmem_size*sizeof(float), cudaMemcpyDeviceToHost);
/*
printf("Output: ");
float error = false;
for (int i=0; i< texmem_size; i++){
printf("%.1f ", host_out[i]);
if (host_out[i] - i > 0.0001) error = true;
}
printf("\n");
if (error) printf("\nFAILED\n");
else printf("\nPASSED\n");
*/
}
void CleanupResources(void){
// Free device memory
}
// Allocates an array with random float entries.
void RandomInit(int* data, int n){
for (int i = 0; i < n; ++i)
data[i] = (int)(rand() / RAND_MAX);
}
|
1ff3fb96a6b4e71a41612d8585011097916faa8e.hip | // !!! This is a file automatically generated by hipify!!!
/**
* Copyright (c) Facebook, Inc. and its affiliates.
*
* This source code is licensed under the MIT license found in the
* LICENSE file in the root directory of this source tree.
*/
#include "../../IndexFlat.h"
#include "../../utils.h"
#include "../GpuIndexFlat.h"
#include "IndexWrapper.h"
#include "../test/TestUtils.h"
#include "../utils/DeviceTensor.cuh"
#include "../utils/DeviceUtils.h"
#include "../utils/HostTensor.cuh"
#include "../utils/Timer.h"
#include <gflags/gflags.h>
#include <map>
#include <memory>
#include <vector>
#include <hip/hip_runtime_api.h>
DEFINE_bool(l2, true, "L2 or inner product");
DEFINE_int32(k, 3, "final number of closest results returned");
DEFINE_int32(num, 128, "# of vecs");
DEFINE_int32(dim, 128, "# of dimensions");
DEFINE_int32(num_queries, 3, "number of query vectors");
DEFINE_bool(diff, true, "show exact distance + index output discrepancies");
DEFINE_bool(use_float16, false, "use encodings in float16");
DEFINE_bool(use_float16_math, false, "perform math in float16");
DEFINE_bool(transposed, false, "store vectors transposed");
DEFINE_int64(seed, -1, "specify random seed");
DEFINE_int32(num_gpus, 1, "number of gpus to use");
DEFINE_int64(pinned_mem, 0, "pinned memory allocation to use");
DEFINE_bool(cpu, true, "run the CPU code for timing and comparison");
DEFINE_bool(use_unified_mem, false, "use Pascal unified memory for the index");
using namespace faiss::gpu;
int main(int argc, char** argv) {
gflags::ParseCommandLineFlags(&argc, &argv, true);
hipProfilerStop();
auto seed = FLAGS_seed != -1L ? FLAGS_seed : time(nullptr);
printf("using seed %ld\n", seed);
auto numQueries = FLAGS_num_queries;
auto index = std::unique_ptr<faiss::IndexFlat>(
new faiss::IndexFlat(FLAGS_dim, FLAGS_l2 ?
faiss::METRIC_L2 : faiss::METRIC_INNER_PRODUCT));
HostTensor<float, 2, true> vecs({FLAGS_num, FLAGS_dim});
faiss::float_rand(vecs.data(), vecs.numElements(), seed);
index->add(FLAGS_num, vecs.data());
printf("Database: dim %d num vecs %d\n", FLAGS_dim, FLAGS_num);
printf("%s lookup: %d queries, total k %d\n",
FLAGS_l2 ? "L2" : "IP",
numQueries, FLAGS_k);
printf("float16 encoding %s\n", FLAGS_use_float16 ? "enabled" : "disabled");
printf("transposed storage %s\n", FLAGS_transposed ? "enabled" : "disabled");
// Convert to GPU index
printf("Copying index to %d GPU(s)...\n", FLAGS_num_gpus);
auto initFn = [&index](faiss::gpu::GpuResources* res, int dev) ->
std::unique_ptr<faiss::gpu::GpuIndexFlat> {
((faiss::gpu::StandardGpuResources*) res)->setPinnedMemory(
FLAGS_pinned_mem);
GpuIndexFlatConfig config;
config.device = dev;
config.useFloat16 = FLAGS_use_float16;
config.useFloat16Accumulator = FLAGS_use_float16_math;
config.storeTransposed = FLAGS_transposed;
config.memorySpace = FLAGS_use_unified_mem ?
MemorySpace::Unified : MemorySpace::Device;
auto p = std::unique_ptr<faiss::gpu::GpuIndexFlat>(
new faiss::gpu::GpuIndexFlat(res, index.get(), config));
return p;
};
IndexWrapper<faiss::gpu::GpuIndexFlat> gpuIndex(FLAGS_num_gpus, initFn);
printf("copy done\n");
// Build query vectors
HostTensor<float, 2, true> cpuQuery({numQueries, FLAGS_dim});
faiss::float_rand(cpuQuery.data(), cpuQuery.numElements(), seed);
// Time faiss CPU
HostTensor<float, 2, true> cpuDistances({numQueries, FLAGS_k});
HostTensor<faiss::Index::idx_t, 2, true> cpuIndices({numQueries, FLAGS_k});
if (FLAGS_cpu) {
float cpuTime = 0.0f;
CpuTimer timer;
index->search(numQueries,
cpuQuery.data(),
FLAGS_k,
cpuDistances.data(),
cpuIndices.data());
cpuTime = timer.elapsedMilliseconds();
printf("CPU time %.3f ms\n", cpuTime);
}
HostTensor<float, 2, true> gpuDistances({numQueries, FLAGS_k});
HostTensor<faiss::Index::idx_t, 2, true> gpuIndices({numQueries, FLAGS_k});
CUDA_VERIFY(hipProfilerStart());
faiss::gpu::synchronizeAllDevices();
float gpuTime = 0.0f;
// Time GPU
{
CpuTimer timer;
gpuIndex.getIndex()->search(cpuQuery.getSize(0),
cpuQuery.data(),
FLAGS_k,
gpuDistances.data(),
gpuIndices.data());
// There is a device -> host copy above, so no need to time
// additional synchronization with the GPU
gpuTime = timer.elapsedMilliseconds();
}
CUDA_VERIFY(hipProfilerStop());
printf("GPU time %.3f ms\n", gpuTime);
if (FLAGS_cpu) {
compareLists(cpuDistances.data(), cpuIndices.data(),
gpuDistances.data(), gpuIndices.data(),
numQueries, FLAGS_k,
"", true, FLAGS_diff, false);
}
CUDA_VERIFY(hipDeviceSynchronize());
return 0;
}
| 1ff3fb96a6b4e71a41612d8585011097916faa8e.cu | /**
* Copyright (c) Facebook, Inc. and its affiliates.
*
* This source code is licensed under the MIT license found in the
* LICENSE file in the root directory of this source tree.
*/
#include "../../IndexFlat.h"
#include "../../utils.h"
#include "../GpuIndexFlat.h"
#include "IndexWrapper.h"
#include "../test/TestUtils.h"
#include "../utils/DeviceTensor.cuh"
#include "../utils/DeviceUtils.h"
#include "../utils/HostTensor.cuh"
#include "../utils/Timer.h"
#include <gflags/gflags.h>
#include <map>
#include <memory>
#include <vector>
#include <cuda_profiler_api.h>
DEFINE_bool(l2, true, "L2 or inner product");
DEFINE_int32(k, 3, "final number of closest results returned");
DEFINE_int32(num, 128, "# of vecs");
DEFINE_int32(dim, 128, "# of dimensions");
DEFINE_int32(num_queries, 3, "number of query vectors");
DEFINE_bool(diff, true, "show exact distance + index output discrepancies");
DEFINE_bool(use_float16, false, "use encodings in float16");
DEFINE_bool(use_float16_math, false, "perform math in float16");
DEFINE_bool(transposed, false, "store vectors transposed");
DEFINE_int64(seed, -1, "specify random seed");
DEFINE_int32(num_gpus, 1, "number of gpus to use");
DEFINE_int64(pinned_mem, 0, "pinned memory allocation to use");
DEFINE_bool(cpu, true, "run the CPU code for timing and comparison");
DEFINE_bool(use_unified_mem, false, "use Pascal unified memory for the index");
using namespace faiss::gpu;
int main(int argc, char** argv) {
gflags::ParseCommandLineFlags(&argc, &argv, true);
cudaProfilerStop();
auto seed = FLAGS_seed != -1L ? FLAGS_seed : time(nullptr);
printf("using seed %ld\n", seed);
auto numQueries = FLAGS_num_queries;
auto index = std::unique_ptr<faiss::IndexFlat>(
new faiss::IndexFlat(FLAGS_dim, FLAGS_l2 ?
faiss::METRIC_L2 : faiss::METRIC_INNER_PRODUCT));
HostTensor<float, 2, true> vecs({FLAGS_num, FLAGS_dim});
faiss::float_rand(vecs.data(), vecs.numElements(), seed);
index->add(FLAGS_num, vecs.data());
printf("Database: dim %d num vecs %d\n", FLAGS_dim, FLAGS_num);
printf("%s lookup: %d queries, total k %d\n",
FLAGS_l2 ? "L2" : "IP",
numQueries, FLAGS_k);
printf("float16 encoding %s\n", FLAGS_use_float16 ? "enabled" : "disabled");
printf("transposed storage %s\n", FLAGS_transposed ? "enabled" : "disabled");
// Convert to GPU index
printf("Copying index to %d GPU(s)...\n", FLAGS_num_gpus);
auto initFn = [&index](faiss::gpu::GpuResources* res, int dev) ->
std::unique_ptr<faiss::gpu::GpuIndexFlat> {
((faiss::gpu::StandardGpuResources*) res)->setPinnedMemory(
FLAGS_pinned_mem);
GpuIndexFlatConfig config;
config.device = dev;
config.useFloat16 = FLAGS_use_float16;
config.useFloat16Accumulator = FLAGS_use_float16_math;
config.storeTransposed = FLAGS_transposed;
config.memorySpace = FLAGS_use_unified_mem ?
MemorySpace::Unified : MemorySpace::Device;
auto p = std::unique_ptr<faiss::gpu::GpuIndexFlat>(
new faiss::gpu::GpuIndexFlat(res, index.get(), config));
return p;
};
IndexWrapper<faiss::gpu::GpuIndexFlat> gpuIndex(FLAGS_num_gpus, initFn);
printf("copy done\n");
// Build query vectors
HostTensor<float, 2, true> cpuQuery({numQueries, FLAGS_dim});
faiss::float_rand(cpuQuery.data(), cpuQuery.numElements(), seed);
// Time faiss CPU
HostTensor<float, 2, true> cpuDistances({numQueries, FLAGS_k});
HostTensor<faiss::Index::idx_t, 2, true> cpuIndices({numQueries, FLAGS_k});
if (FLAGS_cpu) {
float cpuTime = 0.0f;
CpuTimer timer;
index->search(numQueries,
cpuQuery.data(),
FLAGS_k,
cpuDistances.data(),
cpuIndices.data());
cpuTime = timer.elapsedMilliseconds();
printf("CPU time %.3f ms\n", cpuTime);
}
HostTensor<float, 2, true> gpuDistances({numQueries, FLAGS_k});
HostTensor<faiss::Index::idx_t, 2, true> gpuIndices({numQueries, FLAGS_k});
CUDA_VERIFY(cudaProfilerStart());
faiss::gpu::synchronizeAllDevices();
float gpuTime = 0.0f;
// Time GPU
{
CpuTimer timer;
gpuIndex.getIndex()->search(cpuQuery.getSize(0),
cpuQuery.data(),
FLAGS_k,
gpuDistances.data(),
gpuIndices.data());
// There is a device -> host copy above, so no need to time
// additional synchronization with the GPU
gpuTime = timer.elapsedMilliseconds();
}
CUDA_VERIFY(cudaProfilerStop());
printf("GPU time %.3f ms\n", gpuTime);
if (FLAGS_cpu) {
compareLists(cpuDistances.data(), cpuIndices.data(),
gpuDistances.data(), gpuIndices.data(),
numQueries, FLAGS_k,
"", true, FLAGS_diff, false);
}
CUDA_VERIFY(cudaDeviceSynchronize());
return 0;
}
|
96c8cee2816608d0c383744777aed486d0a3c7d6.hip | // !!! This is a file automatically generated by hipify!!!
/*=================================================================
* cmd_nnsearch2_cuda.cpp - nearest-neighbor search (k=2)
*
* Input:
* filename of x : m x k matrix; lists of k-dimensional vectors
* filename of y : n x k matrix; lists of k-dimensional vectors
*
* Output:
* r : m x 2 matrix; lists of 2 tops of minimums
* idx : m x 2 matrix; lists of 2 indices of the 2 tops which show along the n-direction
*
*=================================================================*/
#include <iostream>
#include <fstream>
#include <vector>
#include <cmath>
#include "spdlog/spdlog.h"
#include "nnsearch2.h"
#include "gpudevice.h"
#include "cuda_task_executor.h"
int main(int argc, char* argv[]) {
if (argc < 4) {
std::cout << "Usage: " << argv[0] << " [in file1] [in file2] [out file]" << std::endl;
return 1;
}
std::shared_ptr<spdlog::logger> logger;
try {
spdlog::set_async_mode(4096, spdlog::async_overflow_policy::block_retry, nullptr, std::chrono::seconds(2));
spdlog::set_level(spdlog::level::trace);
logger = spdlog::get("mex_logger");
if (logger == nullptr) {
logger = spdlog::basic_logger_mt("mex_logger", "logs/mex.log");
}
logger->flush_on(spdlog::level::err);
//logger->flush_on(spdlog::level::info);
} catch (const spdlog::spdlog_ex& ex) {
std::cout << "Log initialization failed: " << ex.what() << std::endl;
return 1;
}
try {
logger->info("{:=>50}", " nnsearch2_cuda start");
std::string in_filename1(argv[1]);
std::string in_filename2(argv[2]);
std::string out_filename(argv[3]);
unsigned int m, n, k, k1;
std::ifstream fin1(in_filename1, std::ios::binary);
fin1.read((char*)&m, sizeof(unsigned int));
fin1.read((char*)&k, sizeof(unsigned int));
std::ifstream fin2(in_filename2, std::ios::binary);
fin2.read((char*)&n, sizeof(unsigned int));
fin2.read((char*)&k1, sizeof(unsigned int));
if (k != k1) {
logger->error("k size of data1 and data2 is not the same.");
fin1.close();
fin2.close();
return 1;
}
std::vector<double> in_data1(m * k);
std::vector<double> in_data2(n * k);
fin1.read((char*)in_data1.data(), m * k * sizeof(double));
fin2.read((char*)in_data2.data(), n * k * sizeof(double));
fin1.close();
fin2.close();
int num_gpus = cudautils::get_gpu_num();
logger->info("# of gpus={}", num_gpus);
std::vector<double> outMinsVal(2 * m);
std::vector<unsigned int> outMinsIdx(2 * m);
float round_up_num = 1000.0;
unsigned int dm = 1000;
unsigned int num_dn_blocks = cudautils::get_num_blocks(n, 50000);
unsigned int dn = (n <= 50000) ? n : (unsigned int)(::ceil(float(n) / float(num_dn_blocks) / round_up_num) * round_up_num);
int num_streams = 10;
logger->info("m={},n={},dm={},dn={},# of dn blocks={},# of streams={}", m, n, dm, dn, num_dn_blocks, num_streams);
//TODO check the max of GPU memory usage!
try {
std::shared_ptr<cudautils::NearestNeighborSearch> nns =
std::make_shared<cudautils::NearestNeighborSearch>(m, n, k, dm, dn, num_gpus, num_streams);
cudautils::CudaTaskExecutor executor(num_gpus, num_streams, nns);
nns->setInput(in_data1,in_data2);
executor.run();
nns->getResult(outMinsVal, outMinsIdx);
unsigned int num_2 = 2;
std::ofstream fout(out_filename, std::ios::binary);
fout.write((char*)&m, sizeof(unsigned int));
fout.write((char*)&num_2, sizeof(unsigned int));
fout.write((char*)outMinsVal.data(), num_2 * m * sizeof(double));
fout.write((char*)outMinsIdx.data(), num_2 * m * sizeof(unsigned int));
fout.close();
} catch (...) {
logger->error("internal unknown error occurred");
}
logger->info("{:=>50}", " nnsearch2_cuda end");
logger->flush();
spdlog::drop_all();
} catch (...) {
logger->flush();
throw;
}
return 0;
}
| 96c8cee2816608d0c383744777aed486d0a3c7d6.cu | /*=================================================================
* cmd_nnsearch2_cuda.cpp - nearest-neighbor search (k=2)
*
* Input:
* filename of x : m x k matrix; lists of k-dimensional vectors
* filename of y : n x k matrix; lists of k-dimensional vectors
*
* Output:
* r : m x 2 matrix; lists of 2 tops of minimums
* idx : m x 2 matrix; lists of 2 indices of the 2 tops which show along the n-direction
*
*=================================================================*/
#include <iostream>
#include <fstream>
#include <vector>
#include <cmath>
#include "spdlog/spdlog.h"
#include "nnsearch2.h"
#include "gpudevice.h"
#include "cuda_task_executor.h"
int main(int argc, char* argv[]) {
if (argc < 4) {
std::cout << "Usage: " << argv[0] << " [in file1] [in file2] [out file]" << std::endl;
return 1;
}
std::shared_ptr<spdlog::logger> logger;
try {
spdlog::set_async_mode(4096, spdlog::async_overflow_policy::block_retry, nullptr, std::chrono::seconds(2));
spdlog::set_level(spdlog::level::trace);
logger = spdlog::get("mex_logger");
if (logger == nullptr) {
logger = spdlog::basic_logger_mt("mex_logger", "logs/mex.log");
}
logger->flush_on(spdlog::level::err);
//logger->flush_on(spdlog::level::info);
} catch (const spdlog::spdlog_ex& ex) {
std::cout << "Log initialization failed: " << ex.what() << std::endl;
return 1;
}
try {
logger->info("{:=>50}", " nnsearch2_cuda start");
std::string in_filename1(argv[1]);
std::string in_filename2(argv[2]);
std::string out_filename(argv[3]);
unsigned int m, n, k, k1;
std::ifstream fin1(in_filename1, std::ios::binary);
fin1.read((char*)&m, sizeof(unsigned int));
fin1.read((char*)&k, sizeof(unsigned int));
std::ifstream fin2(in_filename2, std::ios::binary);
fin2.read((char*)&n, sizeof(unsigned int));
fin2.read((char*)&k1, sizeof(unsigned int));
if (k != k1) {
logger->error("k size of data1 and data2 is not the same.");
fin1.close();
fin2.close();
return 1;
}
std::vector<double> in_data1(m * k);
std::vector<double> in_data2(n * k);
fin1.read((char*)in_data1.data(), m * k * sizeof(double));
fin2.read((char*)in_data2.data(), n * k * sizeof(double));
fin1.close();
fin2.close();
int num_gpus = cudautils::get_gpu_num();
logger->info("# of gpus={}", num_gpus);
std::vector<double> outMinsVal(2 * m);
std::vector<unsigned int> outMinsIdx(2 * m);
float round_up_num = 1000.0;
unsigned int dm = 1000;
unsigned int num_dn_blocks = cudautils::get_num_blocks(n, 50000);
unsigned int dn = (n <= 50000) ? n : (unsigned int)(std::ceil(float(n) / float(num_dn_blocks) / round_up_num) * round_up_num);
int num_streams = 10;
logger->info("m={},n={},dm={},dn={},# of dn blocks={},# of streams={}", m, n, dm, dn, num_dn_blocks, num_streams);
//TODO check the max of GPU memory usage!
try {
std::shared_ptr<cudautils::NearestNeighborSearch> nns =
std::make_shared<cudautils::NearestNeighborSearch>(m, n, k, dm, dn, num_gpus, num_streams);
cudautils::CudaTaskExecutor executor(num_gpus, num_streams, nns);
nns->setInput(in_data1,in_data2);
executor.run();
nns->getResult(outMinsVal, outMinsIdx);
unsigned int num_2 = 2;
std::ofstream fout(out_filename, std::ios::binary);
fout.write((char*)&m, sizeof(unsigned int));
fout.write((char*)&num_2, sizeof(unsigned int));
fout.write((char*)outMinsVal.data(), num_2 * m * sizeof(double));
fout.write((char*)outMinsIdx.data(), num_2 * m * sizeof(unsigned int));
fout.close();
} catch (...) {
logger->error("internal unknown error occurred");
}
logger->info("{:=>50}", " nnsearch2_cuda end");
logger->flush();
spdlog::drop_all();
} catch (...) {
logger->flush();
throw;
}
return 0;
}
|
0da6ea0e91f7d1ddc061bb99f9b6689361a6590b.hip | // !!! This is a file automatically generated by hipify!!!
#include <stdbool.h>
#include <stdio.h>
#include <string.h>
#include <getopt.h>
#include <hiprand/hiprand_kernel.h>
#include <stdlib.h>
#include <hip/hip_runtime.h>
#include <sys/time.h>
#include "calculate.cu"
#include<chrono>
#include<iostream>
using namespace std;
using namespace std::chrono;
int blocks_[20][2] = {{8,8},{16,16},{24,24},{32,32},{1,64},{1,128},{1,192},{1,256},{1,320},{1,384},{1,448},{1,512},{1,576},{1,640},{1,704},{1,768},{1,832},{1,896},{1,960},{1,1024}};
int matrices_[7][2] = {{240,240},{496,496},{784,784},{1016,1016},{1232,1232},{1680,1680},{2024,2024}};
int main(int argc, char **argv) {
hipSetDevice(0);
char* p;int matrix_len=strtol(argv[1], &p, 10);
for(int matrix_looper=0;matrix_looper<matrix_len;matrix_looper++){
for(int block_looper=0;block_looper<20;block_looper++){
int XSIZE=matrices_[matrix_looper][0],YSIZE=matrices_[matrix_looper][1],BLOCKX=blocks_[block_looper][0],BLOCKY=blocks_[block_looper][1];
int *arr_in = NULL;
hipMalloc(&arr_in, XSIZE*YSIZE);
int *arr_out = NULL;
hipMalloc(&arr_out, XSIZE*YSIZE);
int sz = 1;
int option = 1;
int iXSIZE= XSIZE;
int iYSIZE= YSIZE;
while(iXSIZE%BLOCKX!=0)
{
iXSIZE++;
}
while(iYSIZE%BLOCKY!=0)
{
iYSIZE++;
}
dim3 gridBlock(iXSIZE/BLOCKX, iYSIZE/BLOCKY);
dim3 threadBlock(BLOCKX, BLOCKY);
hipFree(0);hipLaunchKernelGGL((
calculate), dim3(gridBlock),dim3(threadBlock), 0, 0, arr_in,arr_out,sz,option);
hipDeviceSynchronize();
for (int loop_counter = 0; loop_counter < 10; ++loop_counter) {hipLaunchKernelGGL((
calculate), dim3(gridBlock),dim3(threadBlock), 0, 0, arr_in,arr_out,sz,option);
}
auto start = steady_clock::now();
for (int loop_counter = 0; loop_counter < 1000; loop_counter++) {hipLaunchKernelGGL((
calculate), dim3(gridBlock),dim3(threadBlock), 0, 0, arr_in,arr_out,sz,option);
}
auto end = steady_clock::now();
auto usecs = duration_cast<duration<float, microseconds::period> >(end - start);
cout <<'['<<usecs.count()<<','<<'('<<BLOCKX<<','<<BLOCKY<<')' << ','<<'('<<XSIZE<<','<<YSIZE<<')'<<']' << endl;
}
}} | 0da6ea0e91f7d1ddc061bb99f9b6689361a6590b.cu | #include <stdbool.h>
#include <stdio.h>
#include <string.h>
#include <getopt.h>
#include <curand_kernel.h>
#include <stdlib.h>
#include <cuda.h>
#include <sys/time.h>
#include "calculate.cu"
#include<chrono>
#include<iostream>
using namespace std;
using namespace std::chrono;
int blocks_[20][2] = {{8,8},{16,16},{24,24},{32,32},{1,64},{1,128},{1,192},{1,256},{1,320},{1,384},{1,448},{1,512},{1,576},{1,640},{1,704},{1,768},{1,832},{1,896},{1,960},{1,1024}};
int matrices_[7][2] = {{240,240},{496,496},{784,784},{1016,1016},{1232,1232},{1680,1680},{2024,2024}};
int main(int argc, char **argv) {
cudaSetDevice(0);
char* p;int matrix_len=strtol(argv[1], &p, 10);
for(int matrix_looper=0;matrix_looper<matrix_len;matrix_looper++){
for(int block_looper=0;block_looper<20;block_looper++){
int XSIZE=matrices_[matrix_looper][0],YSIZE=matrices_[matrix_looper][1],BLOCKX=blocks_[block_looper][0],BLOCKY=blocks_[block_looper][1];
int *arr_in = NULL;
cudaMalloc(&arr_in, XSIZE*YSIZE);
int *arr_out = NULL;
cudaMalloc(&arr_out, XSIZE*YSIZE);
int sz = 1;
int option = 1;
int iXSIZE= XSIZE;
int iYSIZE= YSIZE;
while(iXSIZE%BLOCKX!=0)
{
iXSIZE++;
}
while(iYSIZE%BLOCKY!=0)
{
iYSIZE++;
}
dim3 gridBlock(iXSIZE/BLOCKX, iYSIZE/BLOCKY);
dim3 threadBlock(BLOCKX, BLOCKY);
cudaFree(0);
calculate<<<gridBlock,threadBlock>>>(arr_in,arr_out,sz,option);
cudaDeviceSynchronize();
for (int loop_counter = 0; loop_counter < 10; ++loop_counter) {
calculate<<<gridBlock,threadBlock>>>(arr_in,arr_out,sz,option);
}
auto start = steady_clock::now();
for (int loop_counter = 0; loop_counter < 1000; loop_counter++) {
calculate<<<gridBlock,threadBlock>>>(arr_in,arr_out,sz,option);
}
auto end = steady_clock::now();
auto usecs = duration_cast<duration<float, microseconds::period> >(end - start);
cout <<'['<<usecs.count()<<','<<'('<<BLOCKX<<','<<BLOCKY<<')' << ','<<'('<<XSIZE<<','<<YSIZE<<')'<<']' << endl;
}
}} |
2e2013bc73e5da545a25cfadb52e9775d7d6d1d5.hip | // !!! This is a file automatically generated by hipify!!!
#include <hip/hip_runtime_api.h>
#include <stdlib.h>
#include <stdio.h>
void handleCudaMalloc(void **var, ssize_t size) {
hipError_t status;
status = hipMalloc(var, size);
if (status != hipSuccess) {
printf("%s\n", hipGetErrorString(status));
}
}
void handleCudaMemcpy(void* dst, const void* src, ssize_t size, hipMemcpyKind kind) {
hipError_t status;
status = hipMemcpy(dst, src, size, kind);
if (status != hipSuccess) {
printf("%s\n", hipGetErrorString(status));
}
}
void handleCudaFree(void* pointer) {
hipError_t status;
status = hipFree(pointer);
if (status != hipSuccess) {
printf("%s\n", hipGetErrorString(status));
}
} | 2e2013bc73e5da545a25cfadb52e9775d7d6d1d5.cu | #include <cuda_runtime_api.h>
#include <stdlib.h>
#include <stdio.h>
void handleCudaMalloc(void **var, ssize_t size) {
cudaError_t status;
status = cudaMalloc(var, size);
if (status != cudaSuccess) {
printf("%s\n", cudaGetErrorString(status));
}
}
void handleCudaMemcpy(void* dst, const void* src, ssize_t size, cudaMemcpyKind kind) {
cudaError_t status;
status = cudaMemcpy(dst, src, size, kind);
if (status != cudaSuccess) {
printf("%s\n", cudaGetErrorString(status));
}
}
void handleCudaFree(void* pointer) {
cudaError_t status;
status = cudaFree(pointer);
if (status != cudaSuccess) {
printf("%s\n", cudaGetErrorString(status));
}
} |
70110125c1aa6a5b105852c794cd4ec597b1d82b.hip | // !!! This is a file automatically generated by hipify!!!
#include "BMP.h"
#include <stdio.h>
#include <cassert>
#include <stdlib.h>
#include <string.h>
#include <cmath>
#include <ctime>
#include <string>
#include <sstream>
#include <iostream>
#include <algorithm>
#include "hip/hip_runtime.h"
#include "device_launch_parameters.h"
//#include <hip/hip_runtime.h>
//#include <hip/device_functions.h>
//#include <hip/hip_runtime_api.h>
const int D = 28 * 28;
const int CLUSTERS = 14;
const float threshold = 0.0;
const float eps = 1e-6;
const int tile = 1024;
const int clustersInShared = 14;
typedef unsigned char uchar;
using namespace std;
void read4(int * x, FILE * input) {
uchar temp;
*x = 0;
for (int i = 0; i < 4; ++i) {
fread(&temp, 1, 1, input);
*x |= (temp << ((4 - i - 1) * 8));
}
}
void read(int * nObj, float ** obj, uchar ** membership, string filename = "") {
FILE * input;
assert(input = fopen((filename + "images").c_str(), "rb"));
int magic, row, column;
read4(&magic, input);
read4(nObj, input);
read4(&row, input);
read4(&column, input);
//assert(*nObj == 60000);
assert(row == 28);
assert(column == 28);
int size = (*nObj) * D;
printf(" Number of objects = %d\n row = %d\n column = %d\n", *nObj, row, column);
uchar * charObj;
void * temp = malloc(size); assert(temp);
charObj = (uchar *) temp;
//printf("size = %d\n", size);
assert(fread(charObj, 1, size, input) == size);
*obj = new float[size];
for (int i = 0; i < size; ++i){
(*obj)[i] = charObj[i];
}
for (int i = 0; i < *nObj; ++i) {
for (int row = 0; row < 28 / 2; ++row) {
for (int col = 0; col < 28; ++col) {
swap((*obj)[i * D + row * 28 + col], (*obj)[i * D + (28 - row - 1) * 28 + col]);
}
}
}
free(charObj);
fclose(input);
assert(input = fopen((filename + "labels").c_str(), "rb"));
read4(&magic, input);
read4(nObj, input);
//assert(*nObj == 60000);
*membership = new uchar[*nObj];
fread(*membership, *nObj, 1, input);
//for (int i = 0; i < 10; ++i)
// printf("label of the %d-th = %u\n", i, (*membership)[i]);
fclose(input);
puts("READ SUCCESSFUL");
}
__host__ __device__ inline float dist(float * v1, float * v2) {
float res = 0;
for (int i = 0; i < D; ++i) {
//if (v2[i] < v1[i]) res += (v1[i] - v2[i]) * (v1[i] - v2[i]);
res += (*(v2 + i) - *(v1 + i)) * (*(v2 + i) - *(v1 + i));
}
//printf("%.5f\n", sqrt(res));
return res;
}
void print(FILE * file, float * v) {
for (int i = 0; i < 28; ++i) {
for (int j = 0; j < 28; ++j) {
fprintf(file, "%0.f ", v[i * 28 + j]);
}
fprintf(file, "\n");
}
}
void writeBMP(float * img, string filename) {
FILE * tar = fopen(filename.c_str(), "wb");
char temp = 'B';
fwrite(&temp, 1, 1, tar);
temp = 'M';
fwrite(&temp, 1, 1, tar);
int sizeBMP = 26 * 26 * 3 + 4 + 4 + 4 + 2 + sizeof(DIBHeader);
fwrite(&sizeBMP, 4, 1, tar);
fwrite(&sizeBMP, 4, 1, tar);
int offset = 2 + 4 + 4 + 4 + sizeof(DIBHeader);
fwrite(&offset, 4, 1, tar);
DIBHeader dib(28, 28);
fwrite(&dib, sizeof(DIBHeader), 1, tar);
for (int i = 0; i < 28; ++i) {
for (int j = 0; j < 28; ++j) {
unsigned char x = img[i * 28 + j] + 0.5;
for (int k = 0; k < 3; ++k) {
fwrite(&x, 1, 1, tar);
}
}
}
fclose(tar);
}
void sequential(float * obj, int nObj, float * clusters, uchar * membership) {
int alpha;
int loops = 0;
float * sum = new float[CLUSTERS * D];
int * members = new int[CLUSTERS];
do{
for (int i = 0; i < CLUSTERS; ++i) {
members[i] = 0;
for (int k = 0; k < D; ++k) {
sum[i * D + k] = 0;
}
}
alpha = 0;
for (int i = 0; i < nObj; ++i) {
int minid = membership[i];
float mind = dist(obj + i * D, clusters + minid * D);
for (int j = 0; j < CLUSTERS; ++j) {
float x = dist(obj + i * D, clusters + j * D);
if (x - mind < -eps) {
mind = x;
minid = j;
}
}
if (membership[i] != minid) {
membership[i] = minid;
alpha++;
}
members[membership[i]] ++;
for (int k = 0; k < D; ++k) {
sum[membership[i] * D + k] += obj[i * D + k];
}
}
for (int i = 0; i < CLUSTERS; ++i) {
for (int k = 0; k < D; ++k) {
clusters[i * D + k] = sum[i * D + k] * 1. / members[i];
}
}
++loops;
printf("%d -> %.5f\n", loops, 1. * alpha / nObj);
} while (1. * alpha / nObj > threshold && loops < 500);
}
void check(uchar * actual, uchar * proposed, float * clusters, int nObj, int nTest, float *testObj, uchar * testActual) {
int * count = new int[CLUSTERS * CLUSTERS];
int * represents = new int[CLUSTERS];
memset(count, 0, CLUSTERS * CLUSTERS * sizeof(int));
for (int i = 0; i < nObj; ++i) {
count[proposed[i] * CLUSTERS + actual[i]]++;
}
for (int i = 0; i < CLUSTERS; ++i) {
int mx = -1;
for (int j = 0; j < CLUSTERS; ++j) {
if (mx == -1 || count[i * CLUSTERS + j] > count[i * CLUSTERS + mx]) {
mx = j;
}
}
represents[i] = mx;
}
/*
for (int i = 0; i < CLUSTERS; ++i) {
for (int j = 0; j < CLUSTERS; ++j) {
printf("%d\t", count[i * CLUSTERS + j]);
}
puts("");
}
*/
for (int i = 0; i < CLUSTERS; ++i) {
cout << i << " represents " << represents[i] << endl;
}
int wrong = 0;
for (int i = 0; i < nObj; ++i) {
wrong += actual[i] != represents[proposed[i]];
}
puts("----- On training -----");
printf("wrong %d out of %d\n", wrong, nObj);
printf("in percentage %.2f\n", 1. * wrong / nObj);
puts("----- On test ----");
wrong = 0;
for (int i = 0; i < nTest; ++i) {
int mycluster = -1;
float mind;
for (int j = 0; j < CLUSTERS; ++j) {
if (mycluster == -1 || dist(clusters + j * D, testObj + i * D) < mind) {
mind = dist(clusters + j * D, testObj + i * D);
mycluster = j;
}
}
wrong += testActual[i] != represents[mycluster];
}
printf("wrong %d out of %d\n", wrong, nTest);
printf("in percentage %.2f\n", 1. * wrong / nTest);
}
__global__ void simpleFindCluster(float * obj, int nObj, float * clusters, uchar * membership) {
int id = blockIdx.x * blockDim.x + threadIdx.x;
if (id < nObj) {
int x = membership[id];
float mind = dist(obj + id * D, clusters + x * D);
for (int j = 0; j < CLUSTERS; ++j) {
float candd = dist(obj + id * D, clusters + j * D);
if (mind > candd) {
mind = candd;
x = j;
}
}
membership[id] = x;
}
}
void simpleParallel(float * obj, int nObj, float * clusters, uchar * membership) {
int alpha = 0;
int loops = 0;
float * d_obj;
float * d_clusters;
uchar * d_membership, * temp;
temp = new uchar[nObj];
float * sum = new float[CLUSTERS * D];
int * members = new int[CLUSTERS];
hipMalloc((void **)&d_obj, nObj * D * sizeof(float));
hipMalloc((void**)&d_clusters, CLUSTERS * D * sizeof(float));
hipMalloc((void**)&d_membership, nObj);
hipMemcpy(d_obj, obj, nObj * D * sizeof(float), hipMemcpyHostToDevice);
//hipSuccess();
dim3 blocksPerGrid((nObj + tile - 1) / tile);
dim3 threadsPerBlock(tile);
do{
hipMemcpy(d_clusters, clusters, CLUSTERS * D * sizeof(float), hipMemcpyHostToDevice);
hipMemcpy(d_membership, membership, nObj, hipMemcpyHostToDevice);
simpleFindCluster << <blocksPerGrid, threadsPerBlock >> > (d_obj, nObj, d_clusters, d_membership);
hipDeviceSynchronize();
hipMemcpy(temp, d_membership, nObj, hipMemcpyDeviceToHost);
//memset(sum, 0, CLUSTERS * D * sizeof(float));
//memset(members, 0, CLUSTERS * sizeof(int));
for (int i = 0; i < CLUSTERS; ++i) {
members[i] = 0;
for (int j = 0; j < D; ++j) {
sum[i * D + j] = 0.0;
}
}
alpha = 0;
for (int i = 0; i < nObj; ++i) {
alpha += temp[i] != membership[i];
membership[i] = temp[i];
members[membership[i]]++;
for (int j = 0; j < D; ++j) {
sum[membership[i] * D + j] += obj[i * D + j];
}
}
for (int i = 0; i < CLUSTERS; ++i) {
for (int j = 0; j < D; ++j) {
clusters[i * D + j] = sum[i * D + j] / members[i];
}
}
++loops;
printf("%d -> %.5f\n", loops, 1. * alpha / nObj);
} while (1. * alpha / nObj > threshold && loops < 500);
}
__global__ void tiledFindCluster(float * obj, int nObj, float * clusters, uchar * membership) {
int id = blockIdx.x * blockDim.x + threadIdx.x;
float self[D];
for (int i = 0; i < D; ++i) {
self[i] = (id < nObj) ? obj[id * D + i] : 0;
}
__shared__ float sharedClusters[clustersInShared * D];
int x = (id < nObj) ? membership[id] : 0;
float mind = dist(self, clusters + x * D);
float candd;
int N;
for (int k = 0; k < (CLUSTERS - 1) / clustersInShared + 1; ++k) {
N = (CLUSTERS - k * clustersInShared < clustersInShared) ? CLUSTERS - k * clustersInShared : clustersInShared;
for (int i = 0; i < D; ++i) {
if(threadIdx.x < N) sharedClusters[threadIdx.x * D + i] = clusters[(k * clustersInShared + threadIdx.x) * D + i];
}
__syncthreads();
for (int j = 0; j < N; ++j) {
candd = dist(self, sharedClusters + j * D);
/*for (int f = 0; f < D; ++f) {
candd += (self[f] - sharedClusters[j * D + f]) * (self[f] - sharedClusters[j * D + f]);
}*/
if (mind > candd) {
mind = candd;
x = k * clustersInShared + j;
}
}
__syncthreads();
}
if(id < nObj) membership[id] = x;
}
void tiledParallel(float * obj, int nObj, float * clusters, uchar * membership) {
int alpha = 0;
int loops = 0;
float * d_obj;
float * d_clusters;
uchar * d_membership, *temp;
temp = new uchar[nObj];
float * sum = new float[CLUSTERS * D];
int * members = new int[CLUSTERS];
hipMalloc((void **)&d_obj, nObj * D * sizeof(float));
hipMalloc((void**)&d_clusters, CLUSTERS * D * sizeof(float));
hipMalloc((void**)&d_membership, nObj);
hipMemcpy(d_obj, obj, nObj * D * sizeof(float), hipMemcpyHostToDevice);
//hipSuccess();
dim3 blocksPerGrid((nObj + tile - 1) / tile);
dim3 threadsPerBlock(tile);
do{
clock_t start = clock();
hipMemcpy(d_clusters, clusters, CLUSTERS * D * sizeof(float), hipMemcpyHostToDevice);
hipMemcpy(d_membership, membership, nObj, hipMemcpyHostToDevice);
tiledFindCluster << <blocksPerGrid, threadsPerBlock >> > (d_obj, nObj, d_clusters, d_membership);
hipDeviceSynchronize();
hipMemcpy(temp, d_membership, nObj, hipMemcpyDeviceToHost);
//printf(" finding = %.3f s ", (clock() - start) * 1. / CLOCKS_PER_SEC);
start = clock();
//memset(sum, 0, CLUSTERS * D * sizeof(float));
//memset(members, 0, CLUSTERS * sizeof(int));
for (int i = 0; i < CLUSTERS; ++i) {
members[i] = 0;
for (int j = 0; j < D; ++j) {
sum[i * D + j] = 0.0;
}
}
alpha = 0;
for (int i = 0; i < nObj; ++i) {
alpha += temp[i] != membership[i];
membership[i] = temp[i];
members[membership[i]]++;
for (int j = 0; j < D; ++j) {
sum[membership[i] * D + j] += obj[i * D + j];
}
}
for (int i = 0; i < CLUSTERS; ++i) {
for (int j = 0; j < D; ++j) {
clusters[i * D + j] = sum[i * D + j] / members[i];
}
}
//printf("recalc = %.3f s\n", (clock() - start) * 1. / CLOCKS_PER_SEC);
++loops;
printf("membership change rate on %d-th iteration -> %.5f\n", loops, 1. * alpha / nObj);
} while (1. * alpha / nObj > threshold && loops < 500);
}
int main() {
srand(time(NULL));
int nObj;
float * obj;
uchar * actual;
int nTest;
float * testObj;
uchar * testActual;
read(&nObj, &obj, &actual, "");
read(&nTest, &testObj, &testActual, "test-");
//need to assign clusters initially
//TODO add kmeans++ assigning
float * initialClusters = new float[D * CLUSTERS];
int * temp = new int[nObj];
for (int i = 0; i < nObj; ++i) {
temp[i] = i;
}
random_shuffle(temp, temp + nObj);
for (int i = 0; i < CLUSTERS; ++i) {
int from = temp[i];
memcpy(initialClusters + i * D, obj + from * D, D * sizeof(float));
}
clock_t start;
nObj = 60000;
/*----- Sequential--------*/
{
start = clock();
float * clusters = new float[D * CLUSTERS];
memcpy(clusters, initialClusters, D * CLUSTERS * sizeof(float));
uchar * membership = new uchar[nObj];
memset(membership, 0, nObj);
sequential(obj, nObj, clusters, membership);
check(actual, membership, clusters, nObj, nTest, testObj, testActual);
printf("Sequential Total execution time = %.2f\n", (clock() - start) * 1. / CLOCKS_PER_SEC);
}
/*----- End Sequential-----*/
/*------ Simple Parallel ------*/
{
start = clock();
float * clusters = new float[D * CLUSTERS];
memcpy(clusters, initialClusters, D * CLUSTERS * sizeof(float));
uchar * membership = new uchar[nObj];
memset(membership, 0, nObj);
simpleParallel(obj, nObj, clusters, membership);
check(actual, membership, clusters, nObj, nTest, testObj, testActual);
printf("Simple Parallel Total execution time = %.2f\n", (clock() - start) * 1. / CLOCKS_PER_SEC);
}
/*------ End Simple Parallel -----*/
/*------- Tiled Parallel ------*/
{
start = clock();
float * clusters = new float[D * CLUSTERS];
memcpy(clusters, initialClusters, D * CLUSTERS * sizeof(float));
uchar * membership = new uchar[nObj];
memset(membership, 0, nObj);
tiledParallel(obj, nObj, clusters, membership);
check(actual, membership, clusters, nObj, nTest, testObj, testActual);
printf("Tiled Parallel Total execution time = %.2f\n", (clock() - start) * 1. / CLOCKS_PER_SEC);
}
/*------- End Tiled Parallel ------*/
/*
for (int i = 0; i < CLUSTERS; ++i) {
string temp = to_string(i);
temp += ".bmp";
writeBMP(clusters + i * D, temp);
}
*/
/*for (int i = 0; i < 1000; ++i) {
cout << i << " is a member of " << (int)membership[i] << '\n';
}*/
system("PAUSE");
return 0;
}
| 70110125c1aa6a5b105852c794cd4ec597b1d82b.cu | #include "BMP.h"
#include <stdio.h>
#include <cassert>
#include <stdlib.h>
#include <string.h>
#include <cmath>
#include <ctime>
#include <string>
#include <sstream>
#include <iostream>
#include <algorithm>
#include "cuda_runtime.h"
#include "device_launch_parameters.h"
//#include <cuda.h>
//#include <device_functions.h>
//#include <cuda_runtime_api.h>
const int D = 28 * 28;
const int CLUSTERS = 14;
const float threshold = 0.0;
const float eps = 1e-6;
const int tile = 1024;
const int clustersInShared = 14;
typedef unsigned char uchar;
using namespace std;
void read4(int * x, FILE * input) {
uchar temp;
*x = 0;
for (int i = 0; i < 4; ++i) {
fread(&temp, 1, 1, input);
*x |= (temp << ((4 - i - 1) * 8));
}
}
void read(int * nObj, float ** obj, uchar ** membership, string filename = "") {
FILE * input;
assert(input = fopen((filename + "images").c_str(), "rb"));
int magic, row, column;
read4(&magic, input);
read4(nObj, input);
read4(&row, input);
read4(&column, input);
//assert(*nObj == 60000);
assert(row == 28);
assert(column == 28);
int size = (*nObj) * D;
printf(" Number of objects = %d\n row = %d\n column = %d\n", *nObj, row, column);
uchar * charObj;
void * temp = malloc(size); assert(temp);
charObj = (uchar *) temp;
//printf("size = %d\n", size);
assert(fread(charObj, 1, size, input) == size);
*obj = new float[size];
for (int i = 0; i < size; ++i){
(*obj)[i] = charObj[i];
}
for (int i = 0; i < *nObj; ++i) {
for (int row = 0; row < 28 / 2; ++row) {
for (int col = 0; col < 28; ++col) {
swap((*obj)[i * D + row * 28 + col], (*obj)[i * D + (28 - row - 1) * 28 + col]);
}
}
}
free(charObj);
fclose(input);
assert(input = fopen((filename + "labels").c_str(), "rb"));
read4(&magic, input);
read4(nObj, input);
//assert(*nObj == 60000);
*membership = new uchar[*nObj];
fread(*membership, *nObj, 1, input);
//for (int i = 0; i < 10; ++i)
// printf("label of the %d-th = %u\n", i, (*membership)[i]);
fclose(input);
puts("READ SUCCESSFUL");
}
__host__ __device__ inline float dist(float * v1, float * v2) {
float res = 0;
for (int i = 0; i < D; ++i) {
//if (v2[i] < v1[i]) res += (v1[i] - v2[i]) * (v1[i] - v2[i]);
res += (*(v2 + i) - *(v1 + i)) * (*(v2 + i) - *(v1 + i));
}
//printf("%.5f\n", sqrt(res));
return res;
}
void print(FILE * file, float * v) {
for (int i = 0; i < 28; ++i) {
for (int j = 0; j < 28; ++j) {
fprintf(file, "%0.f ", v[i * 28 + j]);
}
fprintf(file, "\n");
}
}
void writeBMP(float * img, string filename) {
FILE * tar = fopen(filename.c_str(), "wb");
char temp = 'B';
fwrite(&temp, 1, 1, tar);
temp = 'M';
fwrite(&temp, 1, 1, tar);
int sizeBMP = 26 * 26 * 3 + 4 + 4 + 4 + 2 + sizeof(DIBHeader);
fwrite(&sizeBMP, 4, 1, tar);
fwrite(&sizeBMP, 4, 1, tar);
int offset = 2 + 4 + 4 + 4 + sizeof(DIBHeader);
fwrite(&offset, 4, 1, tar);
DIBHeader dib(28, 28);
fwrite(&dib, sizeof(DIBHeader), 1, tar);
for (int i = 0; i < 28; ++i) {
for (int j = 0; j < 28; ++j) {
unsigned char x = img[i * 28 + j] + 0.5;
for (int k = 0; k < 3; ++k) {
fwrite(&x, 1, 1, tar);
}
}
}
fclose(tar);
}
void sequential(float * obj, int nObj, float * clusters, uchar * membership) {
int alpha;
int loops = 0;
float * sum = new float[CLUSTERS * D];
int * members = new int[CLUSTERS];
do{
for (int i = 0; i < CLUSTERS; ++i) {
members[i] = 0;
for (int k = 0; k < D; ++k) {
sum[i * D + k] = 0;
}
}
alpha = 0;
for (int i = 0; i < nObj; ++i) {
int minid = membership[i];
float mind = dist(obj + i * D, clusters + minid * D);
for (int j = 0; j < CLUSTERS; ++j) {
float x = dist(obj + i * D, clusters + j * D);
if (x - mind < -eps) {
mind = x;
minid = j;
}
}
if (membership[i] != minid) {
membership[i] = minid;
alpha++;
}
members[membership[i]] ++;
for (int k = 0; k < D; ++k) {
sum[membership[i] * D + k] += obj[i * D + k];
}
}
for (int i = 0; i < CLUSTERS; ++i) {
for (int k = 0; k < D; ++k) {
clusters[i * D + k] = sum[i * D + k] * 1. / members[i];
}
}
++loops;
printf("%d -> %.5f\n", loops, 1. * alpha / nObj);
} while (1. * alpha / nObj > threshold && loops < 500);
}
void check(uchar * actual, uchar * proposed, float * clusters, int nObj, int nTest, float *testObj, uchar * testActual) {
int * count = new int[CLUSTERS * CLUSTERS];
int * represents = new int[CLUSTERS];
memset(count, 0, CLUSTERS * CLUSTERS * sizeof(int));
for (int i = 0; i < nObj; ++i) {
count[proposed[i] * CLUSTERS + actual[i]]++;
}
for (int i = 0; i < CLUSTERS; ++i) {
int mx = -1;
for (int j = 0; j < CLUSTERS; ++j) {
if (mx == -1 || count[i * CLUSTERS + j] > count[i * CLUSTERS + mx]) {
mx = j;
}
}
represents[i] = mx;
}
/*
for (int i = 0; i < CLUSTERS; ++i) {
for (int j = 0; j < CLUSTERS; ++j) {
printf("%d\t", count[i * CLUSTERS + j]);
}
puts("");
}
*/
for (int i = 0; i < CLUSTERS; ++i) {
cout << i << " represents " << represents[i] << endl;
}
int wrong = 0;
for (int i = 0; i < nObj; ++i) {
wrong += actual[i] != represents[proposed[i]];
}
puts("----- On training -----");
printf("wrong %d out of %d\n", wrong, nObj);
printf("in percentage %.2f\n", 1. * wrong / nObj);
puts("----- On test ----");
wrong = 0;
for (int i = 0; i < nTest; ++i) {
int mycluster = -1;
float mind;
for (int j = 0; j < CLUSTERS; ++j) {
if (mycluster == -1 || dist(clusters + j * D, testObj + i * D) < mind) {
mind = dist(clusters + j * D, testObj + i * D);
mycluster = j;
}
}
wrong += testActual[i] != represents[mycluster];
}
printf("wrong %d out of %d\n", wrong, nTest);
printf("in percentage %.2f\n", 1. * wrong / nTest);
}
__global__ void simpleFindCluster(float * obj, int nObj, float * clusters, uchar * membership) {
int id = blockIdx.x * blockDim.x + threadIdx.x;
if (id < nObj) {
int x = membership[id];
float mind = dist(obj + id * D, clusters + x * D);
for (int j = 0; j < CLUSTERS; ++j) {
float candd = dist(obj + id * D, clusters + j * D);
if (mind > candd) {
mind = candd;
x = j;
}
}
membership[id] = x;
}
}
void simpleParallel(float * obj, int nObj, float * clusters, uchar * membership) {
int alpha = 0;
int loops = 0;
float * d_obj;
float * d_clusters;
uchar * d_membership, * temp;
temp = new uchar[nObj];
float * sum = new float[CLUSTERS * D];
int * members = new int[CLUSTERS];
cudaMalloc((void **)&d_obj, nObj * D * sizeof(float));
cudaMalloc((void**)&d_clusters, CLUSTERS * D * sizeof(float));
cudaMalloc((void**)&d_membership, nObj);
cudaMemcpy(d_obj, obj, nObj * D * sizeof(float), cudaMemcpyHostToDevice);
//cudaSuccess();
dim3 blocksPerGrid((nObj + tile - 1) / tile);
dim3 threadsPerBlock(tile);
do{
cudaMemcpy(d_clusters, clusters, CLUSTERS * D * sizeof(float), cudaMemcpyHostToDevice);
cudaMemcpy(d_membership, membership, nObj, cudaMemcpyHostToDevice);
simpleFindCluster << <blocksPerGrid, threadsPerBlock >> > (d_obj, nObj, d_clusters, d_membership);
cudaDeviceSynchronize();
cudaMemcpy(temp, d_membership, nObj, cudaMemcpyDeviceToHost);
//memset(sum, 0, CLUSTERS * D * sizeof(float));
//memset(members, 0, CLUSTERS * sizeof(int));
for (int i = 0; i < CLUSTERS; ++i) {
members[i] = 0;
for (int j = 0; j < D; ++j) {
sum[i * D + j] = 0.0;
}
}
alpha = 0;
for (int i = 0; i < nObj; ++i) {
alpha += temp[i] != membership[i];
membership[i] = temp[i];
members[membership[i]]++;
for (int j = 0; j < D; ++j) {
sum[membership[i] * D + j] += obj[i * D + j];
}
}
for (int i = 0; i < CLUSTERS; ++i) {
for (int j = 0; j < D; ++j) {
clusters[i * D + j] = sum[i * D + j] / members[i];
}
}
++loops;
printf("%d -> %.5f\n", loops, 1. * alpha / nObj);
} while (1. * alpha / nObj > threshold && loops < 500);
}
__global__ void tiledFindCluster(float * obj, int nObj, float * clusters, uchar * membership) {
int id = blockIdx.x * blockDim.x + threadIdx.x;
float self[D];
for (int i = 0; i < D; ++i) {
self[i] = (id < nObj) ? obj[id * D + i] : 0;
}
__shared__ float sharedClusters[clustersInShared * D];
int x = (id < nObj) ? membership[id] : 0;
float mind = dist(self, clusters + x * D);
float candd;
int N;
for (int k = 0; k < (CLUSTERS - 1) / clustersInShared + 1; ++k) {
N = (CLUSTERS - k * clustersInShared < clustersInShared) ? CLUSTERS - k * clustersInShared : clustersInShared;
for (int i = 0; i < D; ++i) {
if(threadIdx.x < N) sharedClusters[threadIdx.x * D + i] = clusters[(k * clustersInShared + threadIdx.x) * D + i];
}
__syncthreads();
for (int j = 0; j < N; ++j) {
candd = dist(self, sharedClusters + j * D);
/*for (int f = 0; f < D; ++f) {
candd += (self[f] - sharedClusters[j * D + f]) * (self[f] - sharedClusters[j * D + f]);
}*/
if (mind > candd) {
mind = candd;
x = k * clustersInShared + j;
}
}
__syncthreads();
}
if(id < nObj) membership[id] = x;
}
void tiledParallel(float * obj, int nObj, float * clusters, uchar * membership) {
int alpha = 0;
int loops = 0;
float * d_obj;
float * d_clusters;
uchar * d_membership, *temp;
temp = new uchar[nObj];
float * sum = new float[CLUSTERS * D];
int * members = new int[CLUSTERS];
cudaMalloc((void **)&d_obj, nObj * D * sizeof(float));
cudaMalloc((void**)&d_clusters, CLUSTERS * D * sizeof(float));
cudaMalloc((void**)&d_membership, nObj);
cudaMemcpy(d_obj, obj, nObj * D * sizeof(float), cudaMemcpyHostToDevice);
//cudaSuccess();
dim3 blocksPerGrid((nObj + tile - 1) / tile);
dim3 threadsPerBlock(tile);
do{
clock_t start = clock();
cudaMemcpy(d_clusters, clusters, CLUSTERS * D * sizeof(float), cudaMemcpyHostToDevice);
cudaMemcpy(d_membership, membership, nObj, cudaMemcpyHostToDevice);
tiledFindCluster << <blocksPerGrid, threadsPerBlock >> > (d_obj, nObj, d_clusters, d_membership);
cudaDeviceSynchronize();
cudaMemcpy(temp, d_membership, nObj, cudaMemcpyDeviceToHost);
//printf(" finding = %.3f s ", (clock() - start) * 1. / CLOCKS_PER_SEC);
start = clock();
//memset(sum, 0, CLUSTERS * D * sizeof(float));
//memset(members, 0, CLUSTERS * sizeof(int));
for (int i = 0; i < CLUSTERS; ++i) {
members[i] = 0;
for (int j = 0; j < D; ++j) {
sum[i * D + j] = 0.0;
}
}
alpha = 0;
for (int i = 0; i < nObj; ++i) {
alpha += temp[i] != membership[i];
membership[i] = temp[i];
members[membership[i]]++;
for (int j = 0; j < D; ++j) {
sum[membership[i] * D + j] += obj[i * D + j];
}
}
for (int i = 0; i < CLUSTERS; ++i) {
for (int j = 0; j < D; ++j) {
clusters[i * D + j] = sum[i * D + j] / members[i];
}
}
//printf("recalc = %.3f s\n", (clock() - start) * 1. / CLOCKS_PER_SEC);
++loops;
printf("membership change rate on %d-th iteration -> %.5f\n", loops, 1. * alpha / nObj);
} while (1. * alpha / nObj > threshold && loops < 500);
}
int main() {
srand(time(NULL));
int nObj;
float * obj;
uchar * actual;
int nTest;
float * testObj;
uchar * testActual;
read(&nObj, &obj, &actual, "");
read(&nTest, &testObj, &testActual, "test-");
//need to assign clusters initially
//TODO add kmeans++ assigning
float * initialClusters = new float[D * CLUSTERS];
int * temp = new int[nObj];
for (int i = 0; i < nObj; ++i) {
temp[i] = i;
}
random_shuffle(temp, temp + nObj);
for (int i = 0; i < CLUSTERS; ++i) {
int from = temp[i];
memcpy(initialClusters + i * D, obj + from * D, D * sizeof(float));
}
clock_t start;
nObj = 60000;
/*----- Sequential--------*/
{
start = clock();
float * clusters = new float[D * CLUSTERS];
memcpy(clusters, initialClusters, D * CLUSTERS * sizeof(float));
uchar * membership = new uchar[nObj];
memset(membership, 0, nObj);
sequential(obj, nObj, clusters, membership);
check(actual, membership, clusters, nObj, nTest, testObj, testActual);
printf("Sequential Total execution time = %.2f\n", (clock() - start) * 1. / CLOCKS_PER_SEC);
}
/*----- End Sequential-----*/
/*------ Simple Parallel ------*/
{
start = clock();
float * clusters = new float[D * CLUSTERS];
memcpy(clusters, initialClusters, D * CLUSTERS * sizeof(float));
uchar * membership = new uchar[nObj];
memset(membership, 0, nObj);
simpleParallel(obj, nObj, clusters, membership);
check(actual, membership, clusters, nObj, nTest, testObj, testActual);
printf("Simple Parallel Total execution time = %.2f\n", (clock() - start) * 1. / CLOCKS_PER_SEC);
}
/*------ End Simple Parallel -----*/
/*------- Tiled Parallel ------*/
{
start = clock();
float * clusters = new float[D * CLUSTERS];
memcpy(clusters, initialClusters, D * CLUSTERS * sizeof(float));
uchar * membership = new uchar[nObj];
memset(membership, 0, nObj);
tiledParallel(obj, nObj, clusters, membership);
check(actual, membership, clusters, nObj, nTest, testObj, testActual);
printf("Tiled Parallel Total execution time = %.2f\n", (clock() - start) * 1. / CLOCKS_PER_SEC);
}
/*------- End Tiled Parallel ------*/
/*
for (int i = 0; i < CLUSTERS; ++i) {
string temp = to_string(i);
temp += ".bmp";
writeBMP(clusters + i * D, temp);
}
*/
/*for (int i = 0; i < 1000; ++i) {
cout << i << " is a member of " << (int)membership[i] << '\n';
}*/
system("PAUSE");
return 0;
}
|
d9f7486278dc01f1c7bb6f9e763cd4c5d6e0b63c.hip | // !!! This is a file automatically generated by hipify!!!
/*
* Copyright 1993-2010 NVIDIA Corporation. All rights reserved.
*
* Este exemplo mostra como podemos utilizar mais de duas GPUs sem a necessidade
* de copiar os dados entre CPU e GPU. Para isso precisamos traduzir o endereco
* do espaco de memoria da CPU para seu equivalente no espaco de memoria da GPU.
*
*/
#include <hip/hip_runtime.h>
#include <stdio.h>
#include <stdlib.h>
#include <iostream>
#include <iomanip>
#include "rf-time.h"
#include "book.h"
#define imin(a,b) (a<b?a:b)
#define N (33*1024*1024)
const int threadsPerBlock = 256;
const int blocksPerGrid = imin( 32, (N/2+threadsPerBlock-1) / threadsPerBlock );
__global__ void dot( int size, float *a, float *b, float *c ) {
__shared__ float cache[threadsPerBlock];
int tid = threadIdx.x + blockIdx.x * blockDim.x;
int cacheIndex = threadIdx.x;
float temp = 0;
while (tid < size) {
temp += a[tid] * b[tid];
tid += blockDim.x * gridDim.x;
}
// set the cache values
cache[cacheIndex] = temp;
// synchronize threads in this block
__syncthreads();
// for reductions, threadsPerBlock must be a power of 2
// because of the following code
int i = blockDim.x/2;
while (i != 0) {
if (cacheIndex < i)
cache[cacheIndex] += cache[cacheIndex + i];
__syncthreads();
i /= 2;
}
if( cacheIndex == 0 )
c[blockIdx.x] = cache[0];
}
struct DataStruct {
int deviceID;
int size;
int offset;
float *a;
float *b;
float returnValue;
};
void* routine( void *pvoidData ) {
DataStruct *data = (DataStruct*)pvoidData;
if( data->deviceID != 0 ) {
hipSetDevice( data->deviceID );
hipSetDeviceFlags( hipDeviceMapHost );
}
int size = data->size;
float *h_a, *h_b, h_c, *h_partial_c;
float *d_a, *d_b, *d_partial_c;
// allocate memory on the CPU side
h_a = data->a;
h_b = data->b;
h_partial_c = (float*)malloc( blocksPerGrid*sizeof(float) );
// allocate the memory on the GPU
hipHostGetDevicePointer( &d_a, h_a, 0 );
hipHostGetDevicePointer( &d_b, h_b, 0 );
hipMalloc( (void**)&d_partial_c, blocksPerGrid*sizeof(float) );
// offset 'd_a' and 'd_b' to where this GPU is gets it data
d_a += data->offset;
d_b += data->offset;
hipLaunchKernelGGL(( dot), dim3(blocksPerGrid),dim3(threadsPerBlock), 0, 0, size, d_a, d_b, d_partial_c );
// copy the array 'c' back from the GPU to the CPU
hipMemcpy( h_partial_c, d_partial_c, blocksPerGrid*sizeof(float), hipMemcpyDeviceToHost );
// finish up on the CPU side
h_c = 0;
for( int i = 0 ; i < blocksPerGrid ; i++ ) {
h_c += h_partial_c[i];
}
hipFree( d_partial_c );
// free memory on the CPU side
free( h_partial_c );
data->returnValue = h_c;
return 0;
}
int main( void ) {
int deviceCount;
hipGetDeviceCount( &deviceCount );
if( deviceCount < 2 ) {
printf( "We need at least two compute 1.0 or greater "
"devices, but only found %d\n", deviceCount );
return 0;
}
hipDeviceProp_t prop;
for( int i = 0 ; i < deviceCount ; i++ ) {
hipGetDeviceProperties( &prop, i );
if( prop.canMapHostMemory != 1 ) {
printf( "Device %d can not map memory.\n", i );
return 0;
}
}
float *a, *b;
hipSetDevice( 0 );
hipSetDeviceFlags( hipDeviceMapHost );
hipHostMalloc( (void**)&a, N*sizeof(float),
hipHostMallocWriteCombined | hipHostMallocPortable | hipHostMallocMapped );
hipHostMalloc( (void**)&b, N*sizeof(float),
hipHostMallocWriteCombined | hipHostMallocPortable | hipHostMallocMapped );
// fill in the host memory with data
for( int i = 0 ; i < N ; i++ ) {
a[i] = i;
b[i] = i*2;
}
// prepare for multithread
DataStruct data[deviceCount];
for( int i = 0 ; i < deviceCount ; i++ ) {
data[i].deviceID = i;
data[i].offset = i*N/deviceCount;
data[i].size = N/deviceCount;
data[i].a = a;
data[i].b = b;
}
CUTThread thread[deviceCount];
for( int i = 1 ; i < deviceCount ; i++ )
thread[i] = start_thread( routine, &(data[i]) );
routine( &(data[0]) );
for( int i = 1 ; i < deviceCount ; i++ )
end_thread( thread[i] );
// free memory on the CPU side
hipHostFree( a );
hipHostFree( b );
float resp = 0.0f;
for( int i = 0 ; i < deviceCount ; i++ )
resp += data[i].returnValue;
printf( "Value calculated: %f\n", resp );
return 0;
}
| d9f7486278dc01f1c7bb6f9e763cd4c5d6e0b63c.cu | /*
* Copyright 1993-2010 NVIDIA Corporation. All rights reserved.
*
* Este exemplo mostra como podemos utilizar mais de duas GPUs sem a necessidade
* de copiar os dados entre CPU e GPU. Para isso precisamos traduzir o endereco
* do espaco de memoria da CPU para seu equivalente no espaco de memoria da GPU.
*
*/
#include <cuda.h>
#include <stdio.h>
#include <stdlib.h>
#include <iostream>
#include <iomanip>
#include "rf-time.h"
#include "book.h"
#define imin(a,b) (a<b?a:b)
#define N (33*1024*1024)
const int threadsPerBlock = 256;
const int blocksPerGrid = imin( 32, (N/2+threadsPerBlock-1) / threadsPerBlock );
__global__ void dot( int size, float *a, float *b, float *c ) {
__shared__ float cache[threadsPerBlock];
int tid = threadIdx.x + blockIdx.x * blockDim.x;
int cacheIndex = threadIdx.x;
float temp = 0;
while (tid < size) {
temp += a[tid] * b[tid];
tid += blockDim.x * gridDim.x;
}
// set the cache values
cache[cacheIndex] = temp;
// synchronize threads in this block
__syncthreads();
// for reductions, threadsPerBlock must be a power of 2
// because of the following code
int i = blockDim.x/2;
while (i != 0) {
if (cacheIndex < i)
cache[cacheIndex] += cache[cacheIndex + i];
__syncthreads();
i /= 2;
}
if( cacheIndex == 0 )
c[blockIdx.x] = cache[0];
}
struct DataStruct {
int deviceID;
int size;
int offset;
float *a;
float *b;
float returnValue;
};
void* routine( void *pvoidData ) {
DataStruct *data = (DataStruct*)pvoidData;
if( data->deviceID != 0 ) {
cudaSetDevice( data->deviceID );
cudaSetDeviceFlags( cudaDeviceMapHost );
}
int size = data->size;
float *h_a, *h_b, h_c, *h_partial_c;
float *d_a, *d_b, *d_partial_c;
// allocate memory on the CPU side
h_a = data->a;
h_b = data->b;
h_partial_c = (float*)malloc( blocksPerGrid*sizeof(float) );
// allocate the memory on the GPU
cudaHostGetDevicePointer( &d_a, h_a, 0 );
cudaHostGetDevicePointer( &d_b, h_b, 0 );
cudaMalloc( (void**)&d_partial_c, blocksPerGrid*sizeof(float) );
// offset 'd_a' and 'd_b' to where this GPU is gets it data
d_a += data->offset;
d_b += data->offset;
dot<<<blocksPerGrid,threadsPerBlock>>>( size, d_a, d_b, d_partial_c );
// copy the array 'c' back from the GPU to the CPU
cudaMemcpy( h_partial_c, d_partial_c, blocksPerGrid*sizeof(float), cudaMemcpyDeviceToHost );
// finish up on the CPU side
h_c = 0;
for( int i = 0 ; i < blocksPerGrid ; i++ ) {
h_c += h_partial_c[i];
}
cudaFree( d_partial_c );
// free memory on the CPU side
free( h_partial_c );
data->returnValue = h_c;
return 0;
}
int main( void ) {
int deviceCount;
cudaGetDeviceCount( &deviceCount );
if( deviceCount < 2 ) {
printf( "We need at least two compute 1.0 or greater "
"devices, but only found %d\n", deviceCount );
return 0;
}
cudaDeviceProp prop;
for( int i = 0 ; i < deviceCount ; i++ ) {
cudaGetDeviceProperties( &prop, i );
if( prop.canMapHostMemory != 1 ) {
printf( "Device %d can not map memory.\n", i );
return 0;
}
}
float *a, *b;
cudaSetDevice( 0 );
cudaSetDeviceFlags( cudaDeviceMapHost );
cudaHostAlloc( (void**)&a, N*sizeof(float),
cudaHostAllocWriteCombined | cudaHostAllocPortable | cudaHostAllocMapped );
cudaHostAlloc( (void**)&b, N*sizeof(float),
cudaHostAllocWriteCombined | cudaHostAllocPortable | cudaHostAllocMapped );
// fill in the host memory with data
for( int i = 0 ; i < N ; i++ ) {
a[i] = i;
b[i] = i*2;
}
// prepare for multithread
DataStruct data[deviceCount];
for( int i = 0 ; i < deviceCount ; i++ ) {
data[i].deviceID = i;
data[i].offset = i*N/deviceCount;
data[i].size = N/deviceCount;
data[i].a = a;
data[i].b = b;
}
CUTThread thread[deviceCount];
for( int i = 1 ; i < deviceCount ; i++ )
thread[i] = start_thread( routine, &(data[i]) );
routine( &(data[0]) );
for( int i = 1 ; i < deviceCount ; i++ )
end_thread( thread[i] );
// free memory on the CPU side
cudaFreeHost( a );
cudaFreeHost( b );
float resp = 0.0f;
for( int i = 0 ; i < deviceCount ; i++ )
resp += data[i].returnValue;
printf( "Value calculated: %f\n", resp );
return 0;
}
|
960066be3fdfdde31d6b7a0bffcf8f2ebf73aed0.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "energy_accumulation.hpp"
#include "flat_bottom_bond.hpp"
#include "gpu_utils.cuh"
#include "k_flat_bottom_bond.cuh"
#include "kernel_utils.cuh"
#include "math_utils.cuh"
#include <vector>
namespace timemachine {
template <typename RealType>
FlatBottomBond<RealType>::FlatBottomBond(const std::vector<int> &bond_idxs) : B_(bond_idxs.size() / 2) {
// validate bond_idxs: even length, all idxs non-negative, and no self-edges
if (bond_idxs.size() % 2 != 0) {
throw std::runtime_error("bond_idxs.size() must be exactly 2*k!");
}
for (int b = 0; b < B_; b++) {
auto src = bond_idxs[b * 2 + 0];
auto dst = bond_idxs[b * 2 + 1];
if (src == dst) {
throw std::runtime_error("src == dst");
}
if ((src < 0) or (dst < 0)) {
throw std::runtime_error("idxs must be non-negative");
}
}
// copy idxs to device
cudaSafeMalloc(&d_bond_idxs_, B_ * 2 * sizeof(*d_bond_idxs_));
gpuErrchk(hipMemcpy(d_bond_idxs_, &bond_idxs[0], B_ * 2 * sizeof(*d_bond_idxs_), hipMemcpyHostToDevice));
cudaSafeMalloc(&d_u_buffer_, B_ * sizeof(*d_u_buffer_));
};
template <typename RealType> FlatBottomBond<RealType>::~FlatBottomBond() {
gpuErrchk(hipFree(d_bond_idxs_));
gpuErrchk(hipFree(d_u_buffer_));
};
template <typename RealType>
void FlatBottomBond<RealType>::execute_device(
const int N,
const int P,
const double *d_x,
const double *d_p,
const double *d_box,
unsigned long long *d_du_dx,
unsigned long long *d_du_dp,
__int128 *d_u,
hipStream_t stream) {
const int num_params_per_bond = 3;
int expected_P = num_params_per_bond * B_;
if (P != expected_P) {
throw std::runtime_error(
"FlatBottomBond::execute_device(): expected P == " + std::to_string(expected_P) +
", got P=" + std::to_string(P));
}
if (B_ > 0) {
const int tpb = DEFAULT_THREADS_PER_BLOCK;
const int blocks = ceil_divide(B_, tpb);
hipLaunchKernelGGL(( k_flat_bottom_bond<RealType>), dim3(blocks), dim3(tpb), 0, stream,
B_, d_x, d_box, d_p, d_bond_idxs_, d_du_dx, d_du_dp, d_u == nullptr ? nullptr : d_u_buffer_);
gpuErrchk(hipPeekAtLastError());
if (d_u) {
accumulate_energy(B_, d_u_buffer_, d_u, stream);
}
}
};
template <typename RealType>
void FlatBottomBond<RealType>::set_bonds_device(const int num_bonds, const int *d_bonds, const hipStream_t stream) {
gpuErrchk(hipMemcpyAsync(
d_bond_idxs_, d_bonds, num_bonds * 2 * sizeof(*d_bond_idxs_), hipMemcpyDeviceToDevice, stream));
B_ = num_bonds;
}
template class FlatBottomBond<double>;
template class FlatBottomBond<float>;
} // namespace timemachine
| 960066be3fdfdde31d6b7a0bffcf8f2ebf73aed0.cu | #include "energy_accumulation.hpp"
#include "flat_bottom_bond.hpp"
#include "gpu_utils.cuh"
#include "k_flat_bottom_bond.cuh"
#include "kernel_utils.cuh"
#include "math_utils.cuh"
#include <vector>
namespace timemachine {
template <typename RealType>
FlatBottomBond<RealType>::FlatBottomBond(const std::vector<int> &bond_idxs) : B_(bond_idxs.size() / 2) {
// validate bond_idxs: even length, all idxs non-negative, and no self-edges
if (bond_idxs.size() % 2 != 0) {
throw std::runtime_error("bond_idxs.size() must be exactly 2*k!");
}
for (int b = 0; b < B_; b++) {
auto src = bond_idxs[b * 2 + 0];
auto dst = bond_idxs[b * 2 + 1];
if (src == dst) {
throw std::runtime_error("src == dst");
}
if ((src < 0) or (dst < 0)) {
throw std::runtime_error("idxs must be non-negative");
}
}
// copy idxs to device
cudaSafeMalloc(&d_bond_idxs_, B_ * 2 * sizeof(*d_bond_idxs_));
gpuErrchk(cudaMemcpy(d_bond_idxs_, &bond_idxs[0], B_ * 2 * sizeof(*d_bond_idxs_), cudaMemcpyHostToDevice));
cudaSafeMalloc(&d_u_buffer_, B_ * sizeof(*d_u_buffer_));
};
template <typename RealType> FlatBottomBond<RealType>::~FlatBottomBond() {
gpuErrchk(cudaFree(d_bond_idxs_));
gpuErrchk(cudaFree(d_u_buffer_));
};
template <typename RealType>
void FlatBottomBond<RealType>::execute_device(
const int N,
const int P,
const double *d_x,
const double *d_p,
const double *d_box,
unsigned long long *d_du_dx,
unsigned long long *d_du_dp,
__int128 *d_u,
cudaStream_t stream) {
const int num_params_per_bond = 3;
int expected_P = num_params_per_bond * B_;
if (P != expected_P) {
throw std::runtime_error(
"FlatBottomBond::execute_device(): expected P == " + std::to_string(expected_P) +
", got P=" + std::to_string(P));
}
if (B_ > 0) {
const int tpb = DEFAULT_THREADS_PER_BLOCK;
const int blocks = ceil_divide(B_, tpb);
k_flat_bottom_bond<RealType><<<blocks, tpb, 0, stream>>>(
B_, d_x, d_box, d_p, d_bond_idxs_, d_du_dx, d_du_dp, d_u == nullptr ? nullptr : d_u_buffer_);
gpuErrchk(cudaPeekAtLastError());
if (d_u) {
accumulate_energy(B_, d_u_buffer_, d_u, stream);
}
}
};
template <typename RealType>
void FlatBottomBond<RealType>::set_bonds_device(const int num_bonds, const int *d_bonds, const cudaStream_t stream) {
gpuErrchk(cudaMemcpyAsync(
d_bond_idxs_, d_bonds, num_bonds * 2 * sizeof(*d_bond_idxs_), cudaMemcpyDeviceToDevice, stream));
B_ = num_bonds;
}
template class FlatBottomBond<double>;
template class FlatBottomBond<float>;
} // namespace timemachine
|
versione1.hip | // !!! This is a file automatically generated by hipify!!!
#import <cuda_runtime.h>
#include <hip/hip_runtime_api.h>
#include <stdlib.h>
#include <stdio.h>
#include <time.h>
#include <math.h>
void error(char const *str)
{
fprintf(stderr, "%s\n", str);
exit(1);
}
void cuda_check(hipError_t err, char const *str)
{
if (err != hipSuccess) {
fprintf(stderr, "%s: CUDA error %d (%s)\n",
str, err, hipGetErrorString(err));
}
}
__global__
void init_vec(int nels, float* __restrict__ d_vec1)
{
int i = threadIdx.x + blockIdx.x * blockDim.x;
d_vec1[i] = i;
}
__global__
void multi_vec(int n_row1,int n_col1,int n_row2,int n_col2,float* __restrict__ res_vec,float* __restrict__ d_vec1,float* __restrict__ d_vec2)
{
int i = threadIdx.x + blockIdx.x * blockDim.x;
int c= blockIdx.x*n_row1 + (threadIdx.x)%n_col1;
int j= ((int)(threadIdx.x/n_row2) + (threadIdx.x%n_row2)*n_col2);
res_vec[i]=d_vec1[c]*d_vec2[j];
}
__global__
void scalareMatrice(float* __restrict__ res_vec,float scalar,float* __restrict__ d_vec)
{
int i = threadIdx.x + blockIdx.x * blockDim.x;
res_vec[i]=d_vec[i]*scalar;
}
__global__
void reduction_row(int N,float* __restrict__ res_vec,float* __restrict__ d_vec1)
{
int i = threadIdx.x + blockIdx.x * blockDim.x;
int idx=(int)(i/N);
float c =res_vec[idx];
float d =d_vec1[i];
if(i%N==31){
res_vec[idx]=d_vec1[i-31]+d_vec1[i-30]+d_vec1[i-29]+d_vec1[i-28]+d_vec1[i-27]+d_vec1[i-26]+d_vec1[i-25]+d_vec1[i-24]+
d_vec1[i-23]+d_vec1[i-22]+d_vec1[i-21]+d_vec1[i-20]+d_vec1[i-19]+d_vec1[i-18]+d_vec1[i-17]+d_vec1[i-16]+
d_vec1[i-15]+d_vec1[i-14]+d_vec1[i-13]+d_vec1[i-12]+d_vec1[i-11]+d_vec1[i-10]+d_vec1[i-9]+d_vec1[i-8]+
d_vec1[i-7]+d_vec1[i-6]+d_vec1[i-5]+d_vec1[i-4]+d_vec1[i-3]+d_vec1[i-2]+d_vec1[i-1]+d_vec1[i];
}
}
__global__
void transpose(int nrow,int ncols, float* __restrict__ res_vec, float* __restrict__ d_vec1)
{
int c = threadIdx.x;
int r=blockIdx.x;
int l_in = r*ncols + c;
int l_out = c * nrow + r;
res_vec[l_out] = d_vec1[l_in];
}
__global__
void vecsum(int nels, float* __restrict__ res_vec, float* __restrict__ d_vec1, float* __restrict__ d_vec2)
{
int i = threadIdx.x + blockIdx.x * blockDim.x;
res_vec[i] = d_vec1[i]+d_vec2[i];
}
__global__
void vecdif(int nels, float* __restrict__ res_vec, float* __restrict__ d_vec1, float* __restrict__ d_vec2)
{
int i = threadIdx.x + blockIdx.x * blockDim.x;
res_vec[i] = d_vec1[i]-d_vec2[i];
}
void stampa(float* matrice,int m){
int i,j;
printf("\n");
for(i=0;i<m;i++){
printf("%f ",matrice[i]);
printf("\n");
}
}
int main(int argc, char *argv[]){
float* matriceA;
float* matriceB;
float* matriceX;
float* pk;
float* trasposta;
float* prodotto;
float* somma;
float* res;
float* den;
float* res0;
float* res1;
float* res2;
float* red_den;
float* matrice;
float* scalar;
float* num;
float* deno;
float ak;
int nels;
printf("%d\n",argc );
if (argc != 2) {
error("syntax: serve N come arg");
}
int N = atoi(argv[1]);
if (N < 0) {
error("N < 0");
}
int M=1;
nels=N*N;
size_t memsize = nels*sizeof(float);
hipError_t err;
err = hipMalloc((void**)&matriceA, memsize);
cuda_check(err, "alloc matriceA");
err = hipMalloc((void**)&matriceB, N*M*sizeof(float));
cuda_check(err, "alloc matriceB");
err = hipMalloc((void**)&matriceX, N*sizeof(float));
cuda_check(err, "alloc matriceX");
err = hipHostMalloc(&matrice, N*N*sizeof(float));
cuda_check(err, "alloc matrice");
err = hipHostMalloc(&num, M*sizeof(float));
cuda_check(err, "alloc matrice");
err = hipHostMalloc(&deno, M*sizeof(float));
cuda_check(err, "alloc matrice");
err = hipMalloc((void**)&somma,nels*M*sizeof(float));
cuda_check(err, "alloc somma");
err = hipMalloc((void**)&res,M*N*N*sizeof(float));
cuda_check(err, "alloc res");
err = hipMalloc((void**)&res0,M*N*sizeof(float));
cuda_check(err, "alloc res0");
err = hipMalloc((void**)&prodotto,M*N*N*sizeof(float));
cuda_check(err, "alloc prodotto");
err = hipMalloc((void**)&res1,M*N*sizeof(float));
cuda_check(err, "alloc res1");
err = hipMalloc((void**)&res2,M*N*sizeof(float));
cuda_check(err, "alloc res2");
err = hipMalloc((void**)&pk,M*N*sizeof(float));
cuda_check(err, "alloc pk");
err = hipMalloc((void**)&trasposta,M*N*sizeof(float));
cuda_check(err, "alloc trasposta ");
err = hipMalloc((void**)&den,M*N*sizeof(float));
cuda_check(err, "alloc den");
err = hipMalloc((void**)&red_den,M*sizeof(float));
cuda_check(err, "alloc den");
err = hipMalloc((void**)&scalar,M*N*sizeof(float));
cuda_check(err, "alloc scalar");
hipEvent_t pre_init, post_init, pre_sum, post_sum, pre_red, post_red,pre_prodotto,post_prodotto,
pre_transpose,post_transpose,pre_scalar_matrice,post_scalar_matrice,pre_vecsum,post_vecsum,
pre_vecdif,post_vecdif;
err = hipEventCreate(&pre_init, 0);
cuda_check(err, "create pre_init");
err = hipEventCreate(&pre_red, 0);
cuda_check(err, "create pre_red");
err = hipEventCreate(&pre_prodotto, 0);
cuda_check(err, "create pre_sum");
err = hipEventCreate(&pre_transpose, 0);
cuda_check(err, "create pre_traspose");
err = hipEventCreate(&pre_scalar_matrice, 0);
cuda_check(err, "create pre_scalar_matrice");
err = hipEventCreate(&pre_vecdif, 0);
cuda_check(err, "create pre_vecdif");
err = hipEventCreate(&pre_vecsum, 0);
cuda_check(err, "create pre_vecsum");
err = hipEventCreate(&post_init, 0);
cuda_check(err, "create post_init");
err = hipEventCreate(&post_red, 0);
cuda_check(err, "create post_red");
err = hipEventCreate(&post_prodotto, 0);
cuda_check(err, "create post_sum");
err = hipEventCreate(&post_transpose, 0);
cuda_check(err, "create post_traspose");
err = hipEventCreate(&post_scalar_matrice, 0);
cuda_check(err, "create post_scalar_matrice");
err = hipEventCreate(&post_vecdif, 0);
cuda_check(err, "create post_vecdif");
err = hipEventCreate(&post_vecsum, 0);
cuda_check(err, "create post_vecsum");
hipEventRecord(pre_init);
hipLaunchKernelGGL(( init_vec), dim3(N), dim3(N), 0, 0, nels, matriceA);
hipEventRecord(post_init);
hipLaunchKernelGGL(( init_vec), dim3(1), dim3(M*N), 0, 0, M*N, matriceB);
hipLaunchKernelGGL(( init_vec), dim3(1), dim3(M*N), 0, 0, M*N, matriceX);
int i;
for(i=0;i<1;i++){
hipEventRecord(pre_prodotto);
hipLaunchKernelGGL(( multi_vec), dim3(N), dim3(M*N), 0, 0, N,N,N,M,somma,matriceA,matriceX);
hipEventRecord(post_prodotto);
hipEventRecord(pre_red);
hipLaunchKernelGGL(( reduction_row), dim3(N), dim3(M*N), 0, 0, N,res0,somma);
hipEventRecord(post_red);
hipEventRecord(pre_vecdif);
hipLaunchKernelGGL(( vecdif), dim3(N),dim3(M), 0, 0, N*M,pk,matriceB,res0);
hipEventRecord(post_vecdif);
hipEventRecord(pre_transpose);
hipLaunchKernelGGL(( transpose), dim3(N),dim3(M), 0, 0, N,M,trasposta,pk);
hipEventRecord(post_transpose);
hipLaunchKernelGGL(( multi_vec), dim3(M), dim3(N), 0, 0, M,N,N,M,prodotto,trasposta,pk);
hipLaunchKernelGGL(( reduction_row), dim3(M), dim3(N), 0, 0, N,res1,prodotto);
hipLaunchKernelGGL(( multi_vec), dim3(M), dim3(M*N*N), 0, 0, M,N,N,N,res,trasposta,matriceA);
hipLaunchKernelGGL(( reduction_row), dim3(M*N), dim3(N), 0, 0, N,res2,res);
hipLaunchKernelGGL(( multi_vec), dim3(N), dim3(M*N), 0, 0, M,N,N,M,den,res2,pk);
hipLaunchKernelGGL(( reduction_row), dim3(N), dim3(M*N), 0, 0, N,red_den,den);
err = hipMemcpy(num, res1, 1*sizeof(float), hipMemcpyDeviceToHost);
err = hipMemcpy(deno, red_den, 1*sizeof(float), hipMemcpyDeviceToHost);
ak=num[0]/deno[0];
hipEventRecord(pre_scalar_matrice);
hipLaunchKernelGGL(( scalareMatrice), dim3(N), dim3(M), 0, 0, scalar,ak,pk);
hipEventRecord(post_scalar_matrice);
hipEventRecord(pre_vecsum);
hipLaunchKernelGGL(( vecsum), dim3(N), dim3(M), 0, 0, N*M*N,matriceX,matriceX,scalar);
hipEventRecord(post_vecsum);
err = hipMemcpy(matrice, matriceX, M*N*sizeof(float), hipMemcpyDeviceToHost);
cuda_check(err, "create mem");
stampa(matrice,M*N);
float runtime_init_ms, runtime_prodotto_ms, runtime_red_ms,runtime_transpose_ms,runtime_scalar_matrice_ms,
runtime_vecdif_ms,runtime_vecsum_ms;
err = hipEventElapsedTime(&runtime_init_ms, pre_init, post_init);
cuda_check(err, "elapsed time init");
err = hipEventElapsedTime(&runtime_prodotto_ms, pre_prodotto, post_prodotto);
cuda_check(err, "elapsed time prodotto");
err = hipEventElapsedTime(&runtime_red_ms, pre_red, post_red);
cuda_check(err, "elapsed time reduction");
err = hipEventElapsedTime(&runtime_transpose_ms, pre_transpose, post_transpose);
cuda_check(err, "elapsed time traspose");
err = hipEventElapsedTime(&runtime_scalar_matrice_ms, pre_scalar_matrice, post_scalar_matrice);
cuda_check(err, "elapsed time scalar_matrice");
err = hipEventElapsedTime(&runtime_vecdif_ms, pre_vecdif, post_vecdif);
cuda_check(err, "elapsed time vecdif");
err = hipEventElapsedTime(&runtime_vecsum_ms, pre_vecsum, post_vecsum);
cuda_check(err, "elapsed time vecsum");
printf("init: runtime %.4gms, %.4g GE/s, %.4g GB/s\n",
runtime_init_ms, nels/runtime_init_ms/1.0e6, memsize/runtime_init_ms/1.0e6);
printf("prodotto: runtime %.4gms, %.4g GE/s, %.4g GB/s\n",
runtime_prodotto_ms, nels/runtime_prodotto_ms/1.0e6, memsize/runtime_prodotto_ms/1.0e6);
printf("reduction: runtime %.4gms, %.4g GE/s, %.4g GB/s\n",
runtime_red_ms, nels/runtime_red_ms/1.0e6, memsize/runtime_red_ms/1.0e6);
printf("transpose: runtime %.4gms, %.4g GE/s, %.4g GB/s\n",
runtime_transpose_ms, N/runtime_transpose_ms/1.0e6, (N*sizeof(float))/runtime_transpose_ms/1.0e6);
printf("scalareMatrice: runtime %.4gms, %.4g GE/s, %.4g GB/s\n",
runtime_scalar_matrice_ms, N/runtime_scalar_matrice_ms/1.0e6, (N*sizeof(float))/runtime_scalar_matrice_ms/1.0e6);
printf("vecdif: runtime %.4gms, %.4g GE/s, %.4g GB/s\n",
runtime_vecdif_ms, N/runtime_vecdif_ms/1.0e6, (N*sizeof(float))/runtime_vecdif_ms/1.0e6);
printf("vecsum: runtime %.4gms, %.4g GE/s, %.4g GB/s\n",
runtime_vecsum_ms, N/runtime_vecsum_ms/1.0e6, (N*sizeof(float))/runtime_vecsum_ms/1.0e6);
}
hipFree(matriceA);
hipHostFree(matrice);
hipFree(somma);
hipFree(res);
hipFree(pk);
hipFree(trasposta);
hipFree(prodotto);
hipFree(den);
hipFree(res0);
hipFree(res1);
hipFree(res2);
hipFree(red_den);
hipFree(scalar);
hipFree(matriceB);
hipFree(matriceX);
hipHostFree(num);
hipHostFree(deno);
}
| versione1.cu | #import <cuda_runtime.h>
#include <cuda_runtime_api.h>
#include <stdlib.h>
#include <stdio.h>
#include <time.h>
#include <math.h>
void error(char const *str)
{
fprintf(stderr, "%s\n", str);
exit(1);
}
void cuda_check(cudaError_t err, char const *str)
{
if (err != cudaSuccess) {
fprintf(stderr, "%s: CUDA error %d (%s)\n",
str, err, cudaGetErrorString(err));
}
}
__global__
void init_vec(int nels, float* __restrict__ d_vec1)
{
int i = threadIdx.x + blockIdx.x * blockDim.x;
d_vec1[i] = i;
}
__global__
void multi_vec(int n_row1,int n_col1,int n_row2,int n_col2,float* __restrict__ res_vec,float* __restrict__ d_vec1,float* __restrict__ d_vec2)
{
int i = threadIdx.x + blockIdx.x * blockDim.x;
int c= blockIdx.x*n_row1 + (threadIdx.x)%n_col1;
int j= ((int)(threadIdx.x/n_row2) + (threadIdx.x%n_row2)*n_col2);
res_vec[i]=d_vec1[c]*d_vec2[j];
}
__global__
void scalareMatrice(float* __restrict__ res_vec,float scalar,float* __restrict__ d_vec)
{
int i = threadIdx.x + blockIdx.x * blockDim.x;
res_vec[i]=d_vec[i]*scalar;
}
__global__
void reduction_row(int N,float* __restrict__ res_vec,float* __restrict__ d_vec1)
{
int i = threadIdx.x + blockIdx.x * blockDim.x;
int idx=(int)(i/N);
float c =res_vec[idx];
float d =d_vec1[i];
if(i%N==31){
res_vec[idx]=d_vec1[i-31]+d_vec1[i-30]+d_vec1[i-29]+d_vec1[i-28]+d_vec1[i-27]+d_vec1[i-26]+d_vec1[i-25]+d_vec1[i-24]+
d_vec1[i-23]+d_vec1[i-22]+d_vec1[i-21]+d_vec1[i-20]+d_vec1[i-19]+d_vec1[i-18]+d_vec1[i-17]+d_vec1[i-16]+
d_vec1[i-15]+d_vec1[i-14]+d_vec1[i-13]+d_vec1[i-12]+d_vec1[i-11]+d_vec1[i-10]+d_vec1[i-9]+d_vec1[i-8]+
d_vec1[i-7]+d_vec1[i-6]+d_vec1[i-5]+d_vec1[i-4]+d_vec1[i-3]+d_vec1[i-2]+d_vec1[i-1]+d_vec1[i];
}
}
__global__
void transpose(int nrow,int ncols, float* __restrict__ res_vec, float* __restrict__ d_vec1)
{
int c = threadIdx.x;
int r=blockIdx.x;
int l_in = r*ncols + c;
int l_out = c * nrow + r;
res_vec[l_out] = d_vec1[l_in];
}
__global__
void vecsum(int nels, float* __restrict__ res_vec, float* __restrict__ d_vec1, float* __restrict__ d_vec2)
{
int i = threadIdx.x + blockIdx.x * blockDim.x;
res_vec[i] = d_vec1[i]+d_vec2[i];
}
__global__
void vecdif(int nels, float* __restrict__ res_vec, float* __restrict__ d_vec1, float* __restrict__ d_vec2)
{
int i = threadIdx.x + blockIdx.x * blockDim.x;
res_vec[i] = d_vec1[i]-d_vec2[i];
}
void stampa(float* matrice,int m){
int i,j;
printf("\n");
for(i=0;i<m;i++){
printf("%f ",matrice[i]);
printf("\n");
}
}
int main(int argc, char *argv[]){
float* matriceA;
float* matriceB;
float* matriceX;
float* pk;
float* trasposta;
float* prodotto;
float* somma;
float* res;
float* den;
float* res0;
float* res1;
float* res2;
float* red_den;
float* matrice;
float* scalar;
float* num;
float* deno;
float ak;
int nels;
printf("%d\n",argc );
if (argc != 2) {
error("syntax: serve N come arg");
}
int N = atoi(argv[1]);
if (N < 0) {
error("N < 0");
}
int M=1;
nels=N*N;
size_t memsize = nels*sizeof(float);
cudaError_t err;
err = cudaMalloc((void**)&matriceA, memsize);
cuda_check(err, "alloc matriceA");
err = cudaMalloc((void**)&matriceB, N*M*sizeof(float));
cuda_check(err, "alloc matriceB");
err = cudaMalloc((void**)&matriceX, N*sizeof(float));
cuda_check(err, "alloc matriceX");
err = cudaMallocHost(&matrice, N*N*sizeof(float));
cuda_check(err, "alloc matrice");
err = cudaMallocHost(&num, M*sizeof(float));
cuda_check(err, "alloc matrice");
err = cudaMallocHost(&deno, M*sizeof(float));
cuda_check(err, "alloc matrice");
err = cudaMalloc((void**)&somma,nels*M*sizeof(float));
cuda_check(err, "alloc somma");
err = cudaMalloc((void**)&res,M*N*N*sizeof(float));
cuda_check(err, "alloc res");
err = cudaMalloc((void**)&res0,M*N*sizeof(float));
cuda_check(err, "alloc res0");
err = cudaMalloc((void**)&prodotto,M*N*N*sizeof(float));
cuda_check(err, "alloc prodotto");
err = cudaMalloc((void**)&res1,M*N*sizeof(float));
cuda_check(err, "alloc res1");
err = cudaMalloc((void**)&res2,M*N*sizeof(float));
cuda_check(err, "alloc res2");
err = cudaMalloc((void**)&pk,M*N*sizeof(float));
cuda_check(err, "alloc pk");
err = cudaMalloc((void**)&trasposta,M*N*sizeof(float));
cuda_check(err, "alloc trasposta ");
err = cudaMalloc((void**)&den,M*N*sizeof(float));
cuda_check(err, "alloc den");
err = cudaMalloc((void**)&red_den,M*sizeof(float));
cuda_check(err, "alloc den");
err = cudaMalloc((void**)&scalar,M*N*sizeof(float));
cuda_check(err, "alloc scalar");
cudaEvent_t pre_init, post_init, pre_sum, post_sum, pre_red, post_red,pre_prodotto,post_prodotto,
pre_transpose,post_transpose,pre_scalar_matrice,post_scalar_matrice,pre_vecsum,post_vecsum,
pre_vecdif,post_vecdif;
err = cudaEventCreate(&pre_init, 0);
cuda_check(err, "create pre_init");
err = cudaEventCreate(&pre_red, 0);
cuda_check(err, "create pre_red");
err = cudaEventCreate(&pre_prodotto, 0);
cuda_check(err, "create pre_sum");
err = cudaEventCreate(&pre_transpose, 0);
cuda_check(err, "create pre_traspose");
err = cudaEventCreate(&pre_scalar_matrice, 0);
cuda_check(err, "create pre_scalar_matrice");
err = cudaEventCreate(&pre_vecdif, 0);
cuda_check(err, "create pre_vecdif");
err = cudaEventCreate(&pre_vecsum, 0);
cuda_check(err, "create pre_vecsum");
err = cudaEventCreate(&post_init, 0);
cuda_check(err, "create post_init");
err = cudaEventCreate(&post_red, 0);
cuda_check(err, "create post_red");
err = cudaEventCreate(&post_prodotto, 0);
cuda_check(err, "create post_sum");
err = cudaEventCreate(&post_transpose, 0);
cuda_check(err, "create post_traspose");
err = cudaEventCreate(&post_scalar_matrice, 0);
cuda_check(err, "create post_scalar_matrice");
err = cudaEventCreate(&post_vecdif, 0);
cuda_check(err, "create post_vecdif");
err = cudaEventCreate(&post_vecsum, 0);
cuda_check(err, "create post_vecsum");
cudaEventRecord(pre_init);
init_vec<<<N, N>>>(nels, matriceA);
cudaEventRecord(post_init);
init_vec<<<1, M*N>>>(M*N, matriceB);
init_vec<<<1, M*N>>>(M*N, matriceX);
int i;
for(i=0;i<1;i++){
cudaEventRecord(pre_prodotto);
multi_vec<<<N, M*N>>>(N,N,N,M,somma,matriceA,matriceX);
cudaEventRecord(post_prodotto);
cudaEventRecord(pre_red);
reduction_row<<<N, M*N>>>(N,res0,somma);
cudaEventRecord(post_red);
cudaEventRecord(pre_vecdif);
vecdif<<<N,M>>>(N*M,pk,matriceB,res0);
cudaEventRecord(post_vecdif);
cudaEventRecord(pre_transpose);
transpose<<<N,M>>>(N,M,trasposta,pk);
cudaEventRecord(post_transpose);
multi_vec<<<M, N>>>(M,N,N,M,prodotto,trasposta,pk);
reduction_row<<<M, N>>>(N,res1,prodotto);
multi_vec<<<M, M*N*N>>>(M,N,N,N,res,trasposta,matriceA);
reduction_row<<<M*N, N>>>(N,res2,res);
multi_vec<<<N, M*N>>>(M,N,N,M,den,res2,pk);
reduction_row<<<N, M*N>>>(N,red_den,den);
err = cudaMemcpy(num, res1, 1*sizeof(float), cudaMemcpyDeviceToHost);
err = cudaMemcpy(deno, red_den, 1*sizeof(float), cudaMemcpyDeviceToHost);
ak=num[0]/deno[0];
cudaEventRecord(pre_scalar_matrice);
scalareMatrice<<<N, M>>>(scalar,ak,pk);
cudaEventRecord(post_scalar_matrice);
cudaEventRecord(pre_vecsum);
vecsum<<<N, M>>>(N*M*N,matriceX,matriceX,scalar);
cudaEventRecord(post_vecsum);
err = cudaMemcpy(matrice, matriceX, M*N*sizeof(float), cudaMemcpyDeviceToHost);
cuda_check(err, "create mem");
stampa(matrice,M*N);
float runtime_init_ms, runtime_prodotto_ms, runtime_red_ms,runtime_transpose_ms,runtime_scalar_matrice_ms,
runtime_vecdif_ms,runtime_vecsum_ms;
err = cudaEventElapsedTime(&runtime_init_ms, pre_init, post_init);
cuda_check(err, "elapsed time init");
err = cudaEventElapsedTime(&runtime_prodotto_ms, pre_prodotto, post_prodotto);
cuda_check(err, "elapsed time prodotto");
err = cudaEventElapsedTime(&runtime_red_ms, pre_red, post_red);
cuda_check(err, "elapsed time reduction");
err = cudaEventElapsedTime(&runtime_transpose_ms, pre_transpose, post_transpose);
cuda_check(err, "elapsed time traspose");
err = cudaEventElapsedTime(&runtime_scalar_matrice_ms, pre_scalar_matrice, post_scalar_matrice);
cuda_check(err, "elapsed time scalar_matrice");
err = cudaEventElapsedTime(&runtime_vecdif_ms, pre_vecdif, post_vecdif);
cuda_check(err, "elapsed time vecdif");
err = cudaEventElapsedTime(&runtime_vecsum_ms, pre_vecsum, post_vecsum);
cuda_check(err, "elapsed time vecsum");
printf("init: runtime %.4gms, %.4g GE/s, %.4g GB/s\n",
runtime_init_ms, nels/runtime_init_ms/1.0e6, memsize/runtime_init_ms/1.0e6);
printf("prodotto: runtime %.4gms, %.4g GE/s, %.4g GB/s\n",
runtime_prodotto_ms, nels/runtime_prodotto_ms/1.0e6, memsize/runtime_prodotto_ms/1.0e6);
printf("reduction: runtime %.4gms, %.4g GE/s, %.4g GB/s\n",
runtime_red_ms, nels/runtime_red_ms/1.0e6, memsize/runtime_red_ms/1.0e6);
printf("transpose: runtime %.4gms, %.4g GE/s, %.4g GB/s\n",
runtime_transpose_ms, N/runtime_transpose_ms/1.0e6, (N*sizeof(float))/runtime_transpose_ms/1.0e6);
printf("scalareMatrice: runtime %.4gms, %.4g GE/s, %.4g GB/s\n",
runtime_scalar_matrice_ms, N/runtime_scalar_matrice_ms/1.0e6, (N*sizeof(float))/runtime_scalar_matrice_ms/1.0e6);
printf("vecdif: runtime %.4gms, %.4g GE/s, %.4g GB/s\n",
runtime_vecdif_ms, N/runtime_vecdif_ms/1.0e6, (N*sizeof(float))/runtime_vecdif_ms/1.0e6);
printf("vecsum: runtime %.4gms, %.4g GE/s, %.4g GB/s\n",
runtime_vecsum_ms, N/runtime_vecsum_ms/1.0e6, (N*sizeof(float))/runtime_vecsum_ms/1.0e6);
}
cudaFree(matriceA);
cudaFreeHost(matrice);
cudaFree(somma);
cudaFree(res);
cudaFree(pk);
cudaFree(trasposta);
cudaFree(prodotto);
cudaFree(den);
cudaFree(res0);
cudaFree(res1);
cudaFree(res2);
cudaFree(red_den);
cudaFree(scalar);
cudaFree(matriceB);
cudaFree(matriceX);
cudaFreeHost(num);
cudaFreeHost(deno);
}
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.